From 9412729ca9968d05d12051b8e8542e2504ba2f6c Mon Sep 17 00:00:00 2001 From: Steven Date: Sun, 19 Jun 2022 01:16:50 -0700 Subject: [PATCH 001/182] Fix Conv2dTranspose bias Conv2dTranspose defaults to have use_bias = true but currently throws a not implemented exception when the parameter is true. --- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index aa4f416f6..548e3ff95 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -235,7 +235,7 @@ public Conv2DTranspose Conv2DTranspose(int filters, string data_format = null, Shape dilation_rate = null, string activation = null, - bool use_bias = true, + bool use_bias = false, string kernel_initializer = null, string bias_initializer = null, string kernel_regularizer = null, From 93cd2b66a6817f5c16806ebe84537e893f5ced49 Mon Sep 17 00:00:00 2001 From: Kevin Hjelden Date: Wed, 10 May 2023 12:58:38 -0700 Subject: [PATCH 002/182] fix: predict with multiple outputs --- src/TensorFlowNET.Keras/Engine/Model.Predict.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Predict.cs b/src/TensorFlowNET.Keras/Engine/Model.Predict.cs index 984bcb5dc..fc8d784ca 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Predict.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Predict.cs @@ -84,7 +84,7 @@ Tensors PredictInternal(DataHandler data_handler, int verbose) Steps = data_handler.Inferredsteps }); - Tensor batch_outputs = null; + Tensors batch_outputs = null; _predict_counter.assign(0); callbacks.on_predict_begin(); foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) @@ -95,7 +95,7 @@ Tensors PredictInternal(DataHandler data_handler, int verbose) var tmp_batch_outputs = run_predict_step(iterator); if (batch_outputs == null) { - batch_outputs = tmp_batch_outputs[0]; + batch_outputs = tmp_batch_outputs; } else { From 36b19df42d0e7266f1bace217b9d619ed16a45c0 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Sun, 7 May 2023 03:51:11 +0800 Subject: [PATCH 003/182] feat: add code generator of ops. --- TensorFlow.NET.sln | 41 ++ Tensorflow.CodeGen/FunctionGenerator.cs | 550 +++++++++++++++++++ Tensorflow.CodeGen/GenOpsWriter.cs | 80 +++ Tensorflow.CodeGen/OpClassifier.cs | 39 ++ Tensorflow.CodeGen/Program.cs | 12 + Tensorflow.CodeGen/Tensorflow.CodeGen.csproj | 18 + Tensorflow.CodeGen/Utils.cs | 46 ++ 7 files changed, 786 insertions(+) create mode 100644 Tensorflow.CodeGen/FunctionGenerator.cs create mode 100644 Tensorflow.CodeGen/GenOpsWriter.cs create mode 100644 Tensorflow.CodeGen/OpClassifier.cs create mode 100644 Tensorflow.CodeGen/Program.cs create mode 100644 Tensorflow.CodeGen/Tensorflow.CodeGen.csproj create mode 100644 Tensorflow.CodeGen/Utils.cs diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 0c7d6e3c2..8d5488146 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -35,6 +35,10 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "helpers", "helpers", "{E1A5 EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistHolder", "helpers\Tensorflow.UnitTest.RedistHolder\Tensorflow.UnitTest.RedistHolder.csproj", "{62D543A2-8846-45A3-829B-5754B094A8E2}" EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{BADBB104-2F03-4824-A249-803A871D8122}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "protobuf.Text", "..\protobuf.Text\src\protobuf.Text\protobuf.Text.csproj", "{151B3A8A-8576-4190-BD58-F42944A49718}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -282,6 +286,42 @@ Global {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x64.Build.0 = Release|Any CPU {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x86.ActiveCfg = Release|Any CPU {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x86.Build.0 = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|Any CPU.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x64.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x64.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x86.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x86.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|Any CPU.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x64.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x64.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x86.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x86.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|Any CPU.ActiveCfg = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|Any CPU.Build.0 = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.ActiveCfg = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.Build.0 = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.ActiveCfg = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.Build.0 = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|Any CPU.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x64.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x64.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x86.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x86.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|Any CPU.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x64.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x64.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x86.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x86.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|Any CPU.ActiveCfg = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|Any CPU.Build.0 = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x64.ActiveCfg = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x64.Build.0 = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x86.ActiveCfg = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -300,6 +340,7 @@ Global {9738D16A-CFA0-405C-A7DF-D3D203B0CB18} = {01A1787F-A9BE-4221-84E8-6360DD010AB6} {7DEA8760-E401-4872-81F3-405F185A13A0} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} {62D543A2-8846-45A3-829B-5754B094A8E2} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {BADBB104-2F03-4824-A249-803A871D8122} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} diff --git a/Tensorflow.CodeGen/FunctionGenerator.cs b/Tensorflow.CodeGen/FunctionGenerator.cs new file mode 100644 index 000000000..d45203072 --- /dev/null +++ b/Tensorflow.CodeGen/FunctionGenerator.cs @@ -0,0 +1,550 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Reflection.Metadata.Ecma335; +using System.Text; +using System.Threading.Tasks; +using Microsoft.CodeAnalysis.CSharp; + +namespace Tensorflow.CodeGen +{ + public class FunctionGenerator + { + public void AppendFunction(OpDef op, StringBuilder sb) + { + // TODO: add descriptions + sb.Append("public static "); + int outputArgsCount = op.OutputArg.Count; + if (outputArgsCount > 1) + { + sb.Append("Tensor[] "); + } + else if (outputArgsCount == 1) + { + sb.Append("Tensor "); + } + else + { + sb.Append("Operation "); + } + string funcName = Utils.ConvertToUnderscore(op.Name); + var token = SyntaxFactory.ParseToken(funcName); + if (token.IsKeyword()) + { + funcName = $"_{funcName}"; + } + sb.Append($" {funcName}("); + + // define args + AppendArgs(op, sb); + sb.Append(")\n{\n"); + + // begin to write main body + sb.AppendLine("var _ctx = tf.Context;"); + sb.AppendLine("if(_ctx.executing_eagerly()){"); + + if(HasRefArgs(op)) + { + var possibleRefArg = op.InputArg.FirstOrDefault(x => x.IsRef, null); + sb.AppendLine($"throw new RuntimeError(\"{funcName} op does not support eager execution. Arg {possibleRefArg.Name} is a ref.\");"); + } + else + { + sb.Append("try\n{\n"); + + AppendFastPathExecute(op, sb); + if (outputArgsCount == 0) + { + sb.AppendLine("return null;"); + } + else if (outputArgsCount == 1) + { + sb.AppendLine("return _fast_path_result[0];"); + } + else + { + sb.AppendLine("return _fast_path_result;"); + } + + sb.AppendLine("}"); // try + + sb.Append("catch(Exception)\n{\n"); + sb.AppendLine("}"); // catch + + sb.Append("try\n{\n"); + AppendEagerFallbackCall(op, sb); + sb.AppendLine("}"); // try + + sb.Append("catch(Exception)\n{\n"); + sb.AppendLine("}"); // catch + } + + sb.AppendLine("}"); // if + + // begin to use op helper. + AppendOpHelperCall(op, sb); + sb.AppendLine("var _result = _op.outputs;"); + + // check if it needs to record gradient. + sb.Append("if(_execute.must_record_gradient())\n{\n"); + sb.Append("object[] _attrs = new object[]{"); + foreach (var attr in op.Attr) + { + string attrRealName = attr.Name; + if (SyntaxFactory.ParseToken(attrRealName).IsKeyword()) + { + attrRealName += "_"; + } + if (attr.Type == "type") + { + sb.Append($"\"{attr.Name}\", _op._get_attr_type(\"{attrRealName}\"), "); + } + else if (attr.Type == "int") + { + sb.Append($"\"{attr.Name}\", _op._get_attr_int(\"{attrRealName}\"), "); + } + else if (attr.Type == "bool") + { + sb.Append($"\"{attr.Name}\", _op._get_attr_bool(\"{attrRealName}\"), "); + } + else + { + sb.Append($"\"{attr.Name}\", _op.get_attr(\"{attr.Name}\"), "); + } + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("};\n"); + sb.AppendLine($"_execute.record_gradient(\"{op.Name}\", _op.inputs, _attrs, _result);"); + + sb.AppendLine("}"); // if + + if (outputArgsCount == 0) + { + sb.AppendLine("return _op;"); + } + else if (outputArgsCount == 1) + { + sb.AppendLine("return _result[0];"); + } + else + { + sb.AppendLine("return _result;"); + } + sb.AppendLine("}"); // body + + sb.AppendLine(); + + AppendEagerFallbackDefinition(op, sb); + } + + public void AppendArgs(OpDef op, StringBuilder sb) + { + foreach (var arg in op.InputArg) + { + string argName = arg.Name; + var token = SyntaxFactory.ParseToken(argName); + if (token.IsKeyword()) + { + argName = $"{argName}_"; + } + if (!string.IsNullOrEmpty(arg.NumberAttr)) + { + sb.Append($"Tensors {argName}, "); + } + else + { + sb.Append($"Tensor {argName}, "); + } + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, (typeStr, value)) in attrValueDic) + { + var token = SyntaxFactory.ParseToken(key); + string realKey = key; + if (token.IsKeyword()) + { + realKey += "_"; + } + if (value != "NOVALUE") + { + sb.Append($"{typeStr} {realKey} = {value}, "); + } + else + { + sb.Append($"{typeStr} {realKey}, "); + } + } + sb.Append($"string? name = null"); + } + + public void AppendFastPathExecute(OpDef op, StringBuilder sb) + { + sb.Append($"var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, \"{op.Name}\", name, "); + foreach (var arg in op.InputArg) + { + string attrArgName = arg.Name; + if (SyntaxFactory.ParseToken(attrArgName).IsKeyword()) + { + attrArgName += "_"; + } + sb.Append($"{attrArgName}, "); + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, _) in attrValueDic) + { + sb.Append($"\"{key}\", {key}, "); + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("));\n"); + } + + public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) + { + string funcName = $"{Utils.ConvertToUnderscore(op.Name)}_eager_fallback"; + sb.Append($"return {funcName}("); + foreach (var arg in op.InputArg) + { + string inputArgRealName = arg.Name; + if (SyntaxFactory.ParseToken(inputArgRealName).IsKeyword()) + { + inputArgRealName += "_"; + } + sb.Append($"{inputArgRealName}, "); + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, _) in attrValueDic) + { + string keyRealName = key; + if (SyntaxFactory.ParseToken(keyRealName).IsKeyword()) + { + keyRealName += "_"; + } + sb.Append($"{key}: {keyRealName}, "); + } + sb.Append("name: name, ctx: _ctx);\n"); + } + + public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) + { + sb.Append("public static Tensor"); + int outputArgsCount = op.OutputArg.Count; + if (outputArgsCount > 1) + { + sb.Append("[]"); + } + string opName = op.Name; + string funcName = Utils.ConvertToUnderscore(op.Name); + sb.Append($" {funcName}_eager_fallback("); + AppendFallBackFunctionArgs(op, sb); + sb.Append(")\n{\n"); + + var possibleRefArg = op.InputArg.FirstOrDefault(x => x.IsRef, null); + if (possibleRefArg is not null) + { + sb.AppendLine($"throw new RuntimeError($\"{funcName} op does not support eager execution." + + $" Arg '{possibleRefArg.Name}' is a ref.\");"); + sb.AppendLine("}"); // body + return; + } + + sb.Append("Tensor[] _inputs_flat = new Tensor[]{"); + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + sb.Append($"{realArgName}, "); + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("};\n"); + + sb.Append("object[] _attrs = new object[]{"); + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var attr in op.Attr) + { + if (attr.Type == "type") + { + bool found = false; + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + if (arg.TypeAttr == attr.Name) + { + sb.Append($"\"{attr.Name}\", {realArgName}.dtype, "); + found = true; + break; + } + } + if (!found) + { + if (attr.Name.StartsWith("T") && attr.Name.Length > 1) + { + string paramName = attr.Name.Substring(1); + if (SyntaxFactory.ParseToken(paramName).IsKeyword()) + { + paramName = $"{paramName}_"; + } + sb.Append($"\"{attr.Name}\", {paramName}.dtype, "); + } + else + { + string attrRealName = attr.Name; + if (SyntaxFactory.ParseToken(attrRealName).IsKeyword()) + { + attrRealName = $"{attrRealName}_"; + } + sb.Append($"\"{attr.Name}\", {attrRealName}, "); + } + } + } + else if(attr.Type == "int" && (op.InputArg.Any(x => x.NumberAttr == attr.Name) || op.OutputArg.Any(x => x.NumberAttr == attr.Name))) + { + bool found = false; + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + if (arg.NumberAttr == attr.Name) + { + sb.Append($"\"{attr.Name}\", {realArgName}.Length, "); + found = true; + break; + } + } + } + else + { + sb.Append($"\"{attr.Name}\", {attr.Name}, "); + } + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("};\n"); + + sb.AppendLine($"var _result = _execute.execute(\"{op.Name}\", {outputArgsCount}, inputs: _inputs_flat, " + + $"attrs: _attrs, ctx: ctx, name: name);"); + + sb.Append("if(_execute.must_record_gradient())\n{\n"); + + sb.AppendLine($"_execute.record_gradient(\"{op.Name}\", _inputs_flat, _attrs, _result);"); + + sb.AppendLine("}"); // if + + if (outputArgsCount == 0) + { + sb.AppendLine("return null;"); + } + else if (outputArgsCount == 1) + { + sb.AppendLine("return _result[0];"); + } + else + { + sb.AppendLine("return _result;"); + } + + sb.AppendLine("}"); // body + } + + public void AppendFallBackFunctionArgs(OpDef op, StringBuilder sb) + { + foreach (var arg in op.InputArg) + { + string argName = arg.Name; + var token = SyntaxFactory.ParseToken(argName); + if (token.IsKeyword()) + { + argName = $"{argName}_"; + } + if (!string.IsNullOrEmpty(arg.NumberAttr)) + { + sb.Append($"Tensors {argName}, "); + } + else + { + sb.Append($"Tensor {argName}, "); + } + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, (typeStr, _)) in attrValueDic) + { + var token = SyntaxFactory.ParseToken(key); + string realKey = key; + if (token.IsKeyword()) + { + realKey += "_"; + } + sb.Append($"{typeStr} {realKey}, "); + } + sb.Append($"string name, Context ctx"); + } + + public void AppendOpHelperCall(OpDef op, StringBuilder sb) + { + sb.AppendLine("Dictionary keywords = new();"); + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName += "_"; + } + sb.AppendLine($"keywords[\"{arg.Name}\"] = {realArgName};"); + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, _) in attrValueDic) + { + sb.Append($"keywords[\"{key}\"] = {key};"); + } + sb.AppendLine($"var _op = tf.OpDefLib._apply_op_helper(\"{op.Name}\", name, keywords);"); + } + + // key, (type string, default value) + public Dictionary GetAttrsDefaultValue(OpDef op) + { + Dictionary dic = new(); + foreach (var attr in op.Attr) + { + if (attr.Type == "type") + { + bool found = op.InputArg.Any(x => x.TypeAttr == attr.Name); + if (!found) + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) + { + string name = Enum.GetName(typeof(TF_DataType), attr.DefaultValue.Type.as_tf_dtype()); + string enumPath = typeof(TF_DataType).Name + "." + name; + dic[attr.Name] = ("TF_DataType", enumPath); + } + else + { + dic[attr.Name] = ("TF_DataType", "NOVALUE"); + } + } + } + else if (attr.Type == "int") + { + if(op.InputArg.Any(x => x.NumberAttr == attr.Name) || op.OutputArg.Any(x => x.NumberAttr == attr.Name)) + { + continue; + } + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.I) + { + dic[attr.Name] = ("int", attr.DefaultValue.I.ToString()); + } + else + { + dic[attr.Name] = ("int", "0"); + } + } + else if (attr.Type == "float") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.F) + { + dic[attr.Name] = ("float", attr.DefaultValue.F.ToString() + "f"); + } + else + { + dic[attr.Name] = ("float", "NOVALUE"); + } + } + else if (attr.Type == "string") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + { + dic[attr.Name] = ("string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\""); + } + else + { + dic[attr.Name] = ("string", "NOVALUE"); + } + } + else if (attr.Type == "bool") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.B) + { + dic[attr.Name] = ("bool", attr.DefaultValue.B.ToString().ToLower()); + } + else + { + dic[attr.Name] = ("bool", "NOVALUE"); + } + } + else if (attr.Type == "shape") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Shape) + { + dic[attr.Name] = ("Shape", $"null"); + } + else + { + dic[attr.Name] = ("Shape", "NOVALUE"); + } + } + else if (attr.Type == "list(type)") + { + dic[attr.Name] = ("TF_DataType[]", "NOVALUE"); + } + else if (attr.Type == "list(shape)") + { + dic[attr.Name] = ("Shape[]", "NOVALUE"); + } + else if (attr.Type == "list(string)") + { + dic[attr.Name] = ("string[]", "NOVALUE"); + } + else if (attr.Type == "list(int)") + { + dic[attr.Name] = ("int[]", "NOVALUE"); + } + else if (attr.Type == "list(float)") + { + dic[attr.Name] = ("float[]", "NOVALUE"); + } + else if (attr.Type == "func") + { + dic[attr.Name] = ("Func", "NOVALUE"); + } + else if (attr.Type == "list(func)") + { + dic[attr.Name] = ("Func[]", "NOVALUE"); + } + else if (attr.Type == "tensor") + { + dic[attr.Name] = ("TensorProto", "NOVALUE"); + } + else + { + throw new NotImplementedException(); + } + } + return dic; + } + + private static bool HasRefArgs(OpDef op) + { + return op.InputArg.Any(x => x.IsRef); + } + } +} diff --git a/Tensorflow.CodeGen/GenOpsWriter.cs b/Tensorflow.CodeGen/GenOpsWriter.cs new file mode 100644 index 000000000..83ca6e0b9 --- /dev/null +++ b/Tensorflow.CodeGen/GenOpsWriter.cs @@ -0,0 +1,80 @@ +using Protobuf.Text; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace Tensorflow.CodeGen +{ + public class GenOpsWriter + { + private string _basePath; + private Dictionary _opMap; + private OpClassifier _opClassifier; + private FunctionGenerator _g = new(); + + public GenOpsWriter(string basePath, string pythonFilesDirectory, string opDefFilename) + { + _basePath = basePath; + + var opDefs = ReadAllOpDefs(opDefFilename); + _opMap = opDefs.Op.ToDictionary( + x => Tensorflow.CodeGen.Utils.ConvertToUnderscore(x.Name), x => x); + _opClassifier = new OpClassifier(pythonFilesDirectory); + } + + public void WriteAll() + { + foreach(var (target, set) in _opClassifier.OpSet) + { + StringBuilder sb = new StringBuilder(); + + // Write file header. + sb.AppendLine("/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/"); + sb.AppendLine(); + + // Add commonly used namespaces. + sb.AppendLine("using Tensorflow.Eager;"); + sb.AppendLine("using Tensorflow.Contexts;"); + sb.AppendLine("using static Tensorflow.Binding;"); + sb.AppendLine(); + + // Specify the namespace + sb.AppendLine("namespace Tensorflow;"); + sb.AppendLine(); + + // Write class name + sb.AppendLine($"internal static class {target}"); + sb.AppendLine("{"); + + foreach(var funcName in set) + { + if(_opMap.ContainsKey(funcName)) + { + var opDef = _opMap[funcName]; + _g.AppendFunction(opDef, sb); + } + else if (funcName.StartsWith("_")) + { + var opDef = _opMap[funcName.Substring(1)]; + _g.AppendFunction(opDef, sb); + } + } + + // Close class scope. + sb.AppendLine("}"); + + string fullFilePath = Path.Combine(_basePath, $"{target}.cs"); + File.WriteAllText(fullFilePath, sb.ToString()); + } + } + + private OpList ReadAllOpDefs(string path) + { + var text = File.ReadAllText(path); + var opDefs = OpList.Parser.ParseText(text); + return opDefs; + } + } +} diff --git a/Tensorflow.CodeGen/OpClassifier.cs b/Tensorflow.CodeGen/OpClassifier.cs new file mode 100644 index 000000000..2ea2f35ef --- /dev/null +++ b/Tensorflow.CodeGen/OpClassifier.cs @@ -0,0 +1,39 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using System.Text.RegularExpressions; + +namespace Tensorflow.CodeGen +{ + public class OpClassifier + { + private static readonly string _filenamePattern = @"^gen_[a-z]*_ops.py$"; + private static readonly string _pythonFunctionPattern = @"def\s+(\w+)\((?:\s*\w+\s*(?:=\s*[\S]*)*,\s*)*\s*\w+\s*=None\s*\):"; + private Dictionary> _opSet = new(); + public Dictionary> OpSet => _opSet; + public OpClassifier(string pythonFileFolder) + { + DirectoryInfo directory = new DirectoryInfo(pythonFileFolder); + + foreach (FileInfo file in directory.GetFiles()) + { + if (Regex.IsMatch(file.Name, _filenamePattern)) + { + string filenamePrefix = file.Name.Split('.')[0]; + string content = File.ReadAllText(file.FullName); + var matches = Regex.Matches(content, _pythonFunctionPattern); + foreach(Match match in matches) + { + var funcName = match.Groups[1].Value; + if (!funcName.EndsWith("_eager_fallback")) + { + _opSet.SetDefault(filenamePrefix, new HashSet()).Add(funcName); + } + } + } + } + } + } +} diff --git a/Tensorflow.CodeGen/Program.cs b/Tensorflow.CodeGen/Program.cs new file mode 100644 index 000000000..d46dcdcba --- /dev/null +++ b/Tensorflow.CodeGen/Program.cs @@ -0,0 +1,12 @@ +using OneOf.Types; +using Protobuf.Text; +using System.Diagnostics; +using System.Text; +using System.Xml.Linq; +using Tensorflow.CodeGen; + +GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops", + @"D:\Apps\miniconda3\envs\tf2.11\Lib\site-packages\tensorflow\python\ops", + @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\ops\ops.pbtxt"); + +writer.WriteAll(); diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj new file mode 100644 index 000000000..61273d013 --- /dev/null +++ b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -0,0 +1,18 @@ + + + + Exe + net6.0 + enable + enable + + + + + + + + + + + diff --git a/Tensorflow.CodeGen/Utils.cs b/Tensorflow.CodeGen/Utils.cs new file mode 100644 index 000000000..8cf21dee6 --- /dev/null +++ b/Tensorflow.CodeGen/Utils.cs @@ -0,0 +1,46 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection.Metadata.Ecma335; +using System.Text; +using System.Threading.Tasks; + +namespace Tensorflow.CodeGen +{ + public static class Utils + { + public static string ConvertToUnderscore(string input) + { + if (string.IsNullOrEmpty(input)) + { + return input; + } + + StringBuilder result = new StringBuilder(); + + int state = 0; // the previous char was not lowered. + for (int i = 0; i < input.Length; i++) + { + char current = input[i]; + + // 首字母不需要添加下划线 + if (i != 0 && char.IsUpper(current)) + { + if(state == 0) + { + result.Append("_"); + state = 1; + } + result.Append(char.ToLower(current)); + } + else + { + result.Append(char.ToLower(current)); + state = 0; + } + } + + return result.ToString(); + } + } +} From 6c651c97ba48b27e8cbf14804a9dc746a8bd830a Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Sun, 7 May 2023 22:49:57 +0800 Subject: [PATCH 004/182] fix: revise wrong behaviors of op code generator. --- Tensorflow.CodeGen/FunctionGenerator.cs | 284 +++++++++++++------ Tensorflow.CodeGen/GenOpsWriter.cs | 4 +- Tensorflow.CodeGen/OpClassifier.cs | 30 +- Tensorflow.CodeGen/Program.cs | 2 + Tensorflow.CodeGen/Tensorflow.CodeGen.csproj | 5 +- Tensorflow.CodeGen/Utils.cs | 15 +- 6 files changed, 242 insertions(+), 98 deletions(-) diff --git a/Tensorflow.CodeGen/FunctionGenerator.cs b/Tensorflow.CodeGen/FunctionGenerator.cs index d45203072..b3b695c58 100644 --- a/Tensorflow.CodeGen/FunctionGenerator.cs +++ b/Tensorflow.CodeGen/FunctionGenerator.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Diagnostics; using System.Linq; +using System.Linq.Expressions; using System.Reflection.Metadata.Ecma335; using System.Text; using System.Threading.Tasks; @@ -16,17 +17,17 @@ public void AppendFunction(OpDef op, StringBuilder sb) // TODO: add descriptions sb.Append("public static "); int outputArgsCount = op.OutputArg.Count; - if (outputArgsCount > 1) + if (outputArgsCount == 0) { - sb.Append("Tensor[] "); + sb.Append("Operation "); } - else if (outputArgsCount == 1) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) { sb.Append("Tensor "); } else { - sb.Append("Operation "); + sb.Append("Tensor[] "); } string funcName = Utils.ConvertToUnderscore(op.Name); var token = SyntaxFactory.ParseToken(funcName); @@ -42,6 +43,17 @@ public void AppendFunction(OpDef op, StringBuilder sb) // begin to write main body sb.AppendLine("var _ctx = tf.Context;"); + + var attrValueDic = GetAttrsDefaultValue(op, out var dynamicDefaultValues); + // deal with dynamic default values. + foreach(var (name, expr) in dynamicDefaultValues) + { + sb.AppendLine($"if({name} is null)"); + sb.AppendLine("{"); + sb.AppendLine($"{name} = {expr};"); + sb.AppendLine("}"); + } + sb.AppendLine("if(_ctx.executing_eagerly()){"); if(HasRefArgs(op)) @@ -58,7 +70,7 @@ public void AppendFunction(OpDef op, StringBuilder sb) { sb.AppendLine("return null;"); } - else if (outputArgsCount == 1) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) { sb.AppendLine("return _fast_path_result[0];"); } @@ -82,6 +94,17 @@ public void AppendFunction(OpDef op, StringBuilder sb) sb.AppendLine("}"); // if + foreach(var (name, type, value) in attrValueDic.Where(x => x.Item2 == "string")) + { + if(value != "NOVALUE") + { + sb.AppendLine($"if({name} is null)"); + sb.AppendLine("{"); + sb.AppendLine($"{name} = {value};"); + sb.AppendLine("}"); + } + } + // begin to use op helper. AppendOpHelperCall(op, sb); sb.AppendLine("var _result = _op.outputs;"); @@ -126,7 +149,7 @@ public void AppendFunction(OpDef op, StringBuilder sb) { sb.AppendLine("return _op;"); } - else if (outputArgsCount == 1) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) { sb.AppendLine("return _result[0];"); } @@ -160,8 +183,8 @@ public void AppendArgs(OpDef op, StringBuilder sb) sb.Append($"Tensor {argName}, "); } } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, (typeStr, value)) in attrValueDic) + var attrValueDic = GetAttrsDefaultValue(op, out var dynamicDefaultValues); + foreach (var (key, typeStr, value) in attrValueDic.Where(x => x.Item3 == "NOVALUE")) { var token = SyntaxFactory.ParseToken(key); string realKey = key; @@ -169,21 +192,25 @@ public void AppendArgs(OpDef op, StringBuilder sb) { realKey += "_"; } - if (value != "NOVALUE") - { - sb.Append($"{typeStr} {realKey} = {value}, "); - } - else + sb.Append($"{typeStr} {realKey}, "); + } + foreach (var (key, typeStr, value) in attrValueDic.Where(x => x.Item3 != "NOVALUE")) + { + var token = SyntaxFactory.ParseToken(key); + string realKey = key; + if (token.IsKeyword()) { - sb.Append($"{typeStr} {realKey}, "); + realKey += "_"; } + sb.Append($"{typeStr} {realKey} = {value}, "); } sb.Append($"string? name = null"); } public void AppendFastPathExecute(OpDef op, StringBuilder sb) { - sb.Append($"var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, \"{op.Name}\", name, "); + sb.Append($"var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, \"{op.Name}\", name)"); + sb.Append("{ args = new object[]{ "); foreach (var arg in op.InputArg) { string attrArgName = arg.Name; @@ -193,16 +220,23 @@ public void AppendFastPathExecute(OpDef op, StringBuilder sb) } sb.Append($"{attrArgName}, "); } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, _) in attrValueDic) + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') { - sb.Append($"\"{key}\", {key}, "); + sb.Remove(sb.Length - 2, 2); + } + + sb.Append("}, attrs = new Dictionary(){ "); + var attrValueDic = GetAttrsDefaultValue(op, out var _); + foreach (var (key, _, _) in attrValueDic) + { + sb.Append($"[\"{key}\"] = {key}, "); } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') { sb.Remove(sb.Length - 2, 2); } - sb.Append("));\n"); + sb.Append("}});\n"); } public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) @@ -218,8 +252,8 @@ public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) } sb.Append($"{inputArgRealName}, "); } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, _) in attrValueDic) + var attrValueDic = GetAttrsDefaultValue(op, out var _); + foreach (var (key, _, _) in attrValueDic) { string keyRealName = key; if (SyntaxFactory.ParseToken(keyRealName).IsKeyword()) @@ -233,11 +267,19 @@ public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) { - sb.Append("public static Tensor"); + sb.Append("public static "); int outputArgsCount = op.OutputArg.Count; - if (outputArgsCount > 1) + if (outputArgsCount == 0) + { + sb.Append("Operation "); + } + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) + { + sb.Append("Tensor "); + } + else { - sb.Append("[]"); + sb.Append("Tensor[] "); } string opName = op.Name; string funcName = Utils.ConvertToUnderscore(op.Name); @@ -254,24 +296,47 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) return; } - sb.Append("Tensor[] _inputs_flat = new Tensor[]{"); - foreach (var arg in op.InputArg) + if(op.InputArg.Any(x => !string.IsNullOrEmpty(x.NumberAttr))) { - string realArgName = arg.Name; - if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + sb.AppendLine("List _inputs_flat_list = new();"); + foreach (var arg in op.InputArg) { - realArgName = $"{realArgName}_"; + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + if (string.IsNullOrEmpty(arg.NumberAttr)) + { + sb.AppendLine($"_inputs_flat_list.Add({realArgName});"); + } + else + { + sb.AppendLine($"_inputs_flat_list.AddRange({realArgName});"); + } } - sb.Append($"{realArgName}, "); + sb.AppendLine($"var _inputs_flat = _inputs_flat_list.ToArray();"); } - if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + else { - sb.Remove(sb.Length - 2, 2); + sb.Append("Tensor[] _inputs_flat = new Tensor[]{"); + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + sb.Append($"{realArgName}, "); + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("};\n"); } - sb.Append("};\n"); sb.Append("object[] _attrs = new object[]{"); - var attrValueDic = GetAttrsDefaultValue(op); foreach (var attr in op.Attr) { if (attr.Type == "type") @@ -293,27 +358,15 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) } if (!found) { - if (attr.Name.StartsWith("T") && attr.Name.Length > 1) - { - string paramName = attr.Name.Substring(1); - if (SyntaxFactory.ParseToken(paramName).IsKeyword()) - { - paramName = $"{paramName}_"; - } - sb.Append($"\"{attr.Name}\", {paramName}.dtype, "); - } - else + string attrRealName = attr.Name; + if (SyntaxFactory.ParseToken(attrRealName).IsKeyword()) { - string attrRealName = attr.Name; - if (SyntaxFactory.ParseToken(attrRealName).IsKeyword()) - { - attrRealName = $"{attrRealName}_"; - } - sb.Append($"\"{attr.Name}\", {attrRealName}, "); + attrRealName = $"{attrRealName}_"; } + sb.Append($"\"{attr.Name}\", {attrRealName}, "); } } - else if(attr.Type == "int" && (op.InputArg.Any(x => x.NumberAttr == attr.Name) || op.OutputArg.Any(x => x.NumberAttr == attr.Name))) + else if(attr.Type == "int" && op.InputArg.Any(x => x.NumberAttr == attr.Name)) { bool found = false; foreach (var arg in op.InputArg) @@ -355,7 +408,7 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) { sb.AppendLine("return null;"); } - else if (outputArgsCount == 1) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) { sb.AppendLine("return _result[0];"); } @@ -386,8 +439,8 @@ public void AppendFallBackFunctionArgs(OpDef op, StringBuilder sb) sb.Append($"Tensor {argName}, "); } } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, (typeStr, _)) in attrValueDic) + var attrValueDic = GetAttrsDefaultValue(op, out var _); + foreach (var (key, typeStr, _) in attrValueDic) { var token = SyntaxFactory.ParseToken(key); string realKey = key; @@ -412,18 +465,19 @@ public void AppendOpHelperCall(OpDef op, StringBuilder sb) } sb.AppendLine($"keywords[\"{arg.Name}\"] = {realArgName};"); } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, _) in attrValueDic) + var attrValueDic = GetAttrsDefaultValue(op, out var _); + foreach (var (key, _, _) in attrValueDic) { - sb.Append($"keywords[\"{key}\"] = {key};"); + sb.AppendLine($"keywords[\"{key}\"] = {key};"); } sb.AppendLine($"var _op = tf.OpDefLib._apply_op_helper(\"{op.Name}\", name, keywords);"); } - // key, (type string, default value) - public Dictionary GetAttrsDefaultValue(OpDef op) + // name, type string, default value + public List<(string, string, string)> GetAttrsDefaultValue(OpDef op, out Dictionary dynamicDefaultValues) { - Dictionary dic = new(); + dynamicDefaultValues = new(); + List<(string, string, string)> res = new(); foreach (var attr in op.Attr) { if (attr.Type == "type") @@ -435,111 +489,177 @@ public void AppendOpHelperCall(OpDef op, StringBuilder sb) { string name = Enum.GetName(typeof(TF_DataType), attr.DefaultValue.Type.as_tf_dtype()); string enumPath = typeof(TF_DataType).Name + "." + name; - dic[attr.Name] = ("TF_DataType", enumPath); + res.Add((attr.Name, "TF_DataType", enumPath)); } else { - dic[attr.Name] = ("TF_DataType", "NOVALUE"); + res.Add((attr.Name, "TF_DataType", "NOVALUE")); } } } else if (attr.Type == "int") { - if(op.InputArg.Any(x => x.NumberAttr == attr.Name) || op.OutputArg.Any(x => x.NumberAttr == attr.Name)) + if(op.InputArg.Any(x => x.NumberAttr == attr.Name)) { continue; } if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.I) { - dic[attr.Name] = ("int", attr.DefaultValue.I.ToString()); + res.Add((attr.Name, "int", attr.DefaultValue.I.ToString())); } else { - dic[attr.Name] = ("int", "0"); + res.Add((attr.Name, "int", "0")); } } else if (attr.Type == "float") { if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.F) { - dic[attr.Name] = ("float", attr.DefaultValue.F.ToString() + "f"); + res.Add((attr.Name, "float", attr.DefaultValue.F.ToString() + "f")); } else { - dic[attr.Name] = ("float", "NOVALUE"); + res.Add((attr.Name, "float", "NOVALUE")); } } else if (attr.Type == "string") { if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) { - dic[attr.Name] = ("string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\""); + res.Add((attr.Name, "string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\"")); } else { - dic[attr.Name] = ("string", "NOVALUE"); + res.Add((attr.Name, "string", "NOVALUE")); } } else if (attr.Type == "bool") { if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.B) { - dic[attr.Name] = ("bool", attr.DefaultValue.B.ToString().ToLower()); + res.Add((attr.Name, "bool", attr.DefaultValue.B.ToString().ToLower())); } else { - dic[attr.Name] = ("bool", "NOVALUE"); + res.Add((attr.Name, "bool", "NOVALUE")); } } else if (attr.Type == "shape") { if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Shape) { - dic[attr.Name] = ("Shape", $"null"); + if (attr.DefaultValue.Shape.UnknownRank) + { + res.Add((attr.Name, "Shape", $"null")); + } + else + { + Shape shape = new Shape(attr.DefaultValue.Shape); + string expression = $"new Shape({string.Join(", ", shape.dims)})"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "Shape", $"null")); + } } else { - dic[attr.Name] = ("Shape", "NOVALUE"); + res.Add((attr.Name, "Shape", "NOVALUE")); } } else if (attr.Type == "list(type)") { - dic[attr.Name] = ("TF_DataType[]", "NOVALUE"); + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.Type) + { + values.Add(value.as_tf_dtype()); + } + string expression = "new TF_DataType[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "TF_DataType[]", $"null")); + } + else + { + res.Add((attr.Name, "TF_DataType[]", "NOVALUE")); + } } else if (attr.Type == "list(shape)") { - dic[attr.Name] = ("Shape[]", "NOVALUE"); + res.Add((attr.Name, "Shape[]", "NOVALUE")); } else if (attr.Type == "list(string)") { - dic[attr.Name] = ("string[]", "NOVALUE"); + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.S) + { + values.Add(value.ToStringUtf8()); + } + string expression = "new string[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "string[]", $"null")); + } + else + { + res.Add((attr.Name, "string[]", "NOVALUE")); + } } else if (attr.Type == "list(int)") { - dic[attr.Name] = ("int[]", "NOVALUE"); + if(attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List values = new(); + foreach(var value in attr.DefaultValue.List.I) + { + values.Add((int)value); + } + string expression = "new int[]{" + $"{string.Join(", ", values)}" +"}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "int[]", $"null")); + } + else + { + res.Add((attr.Name, "int[]", "NOVALUE")); + } } else if (attr.Type == "list(float)") { - dic[attr.Name] = ("float[]", "NOVALUE"); + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.F) + { + values.Add(value); + } + string expression = "new float[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "float[]", $"null")); + } + else + { + res.Add((attr.Name, "float[]", "NOVALUE")); + } } else if (attr.Type == "func") { - dic[attr.Name] = ("Func", "NOVALUE"); + res.Add((attr.Name, "Func", "NOVALUE")); } else if (attr.Type == "list(func)") { - dic[attr.Name] = ("Func[]", "NOVALUE"); + res.Add((attr.Name, "Func[]", "NOVALUE")); } else if (attr.Type == "tensor") { - dic[attr.Name] = ("TensorProto", "NOVALUE"); + res.Add((attr.Name, "TensorProto", "NOVALUE")); } else { throw new NotImplementedException(); } } - return dic; + return res; } private static bool HasRefArgs(OpDef op) diff --git a/Tensorflow.CodeGen/GenOpsWriter.cs b/Tensorflow.CodeGen/GenOpsWriter.cs index 83ca6e0b9..2cd7bca50 100644 --- a/Tensorflow.CodeGen/GenOpsWriter.cs +++ b/Tensorflow.CodeGen/GenOpsWriter.cs @@ -21,7 +21,7 @@ public GenOpsWriter(string basePath, string pythonFilesDirectory, string opDefFi var opDefs = ReadAllOpDefs(opDefFilename); _opMap = opDefs.Op.ToDictionary( x => Tensorflow.CodeGen.Utils.ConvertToUnderscore(x.Name), x => x); - _opClassifier = new OpClassifier(pythonFilesDirectory); + _opClassifier = new OpClassifier(pythonFilesDirectory, opDefs.Op.Select(x => Utils.ConvertToUnderscore(x.Name))); } public void WriteAll() @@ -45,7 +45,7 @@ public void WriteAll() sb.AppendLine(); // Write class name - sb.AppendLine($"internal static class {target}"); + sb.AppendLine($"public static class {target}"); sb.AppendLine("{"); foreach(var funcName in set) diff --git a/Tensorflow.CodeGen/OpClassifier.cs b/Tensorflow.CodeGen/OpClassifier.cs index 2ea2f35ef..eaad3fec8 100644 --- a/Tensorflow.CodeGen/OpClassifier.cs +++ b/Tensorflow.CodeGen/OpClassifier.cs @@ -10,27 +10,39 @@ namespace Tensorflow.CodeGen public class OpClassifier { private static readonly string _filenamePattern = @"^gen_[a-z]*_ops.py$"; - private static readonly string _pythonFunctionPattern = @"def\s+(\w+)\((?:\s*\w+\s*(?:=\s*[\S]*)*,\s*)*\s*\w+\s*=None\s*\):"; + private static readonly string _pythonFunctionPattern = @"def\s+(\w+\d*\w*)\((?:\s*\w+\s*(?:=\s*[\S]*)*,\s*)*\s*name=None\):"; private Dictionary> _opSet = new(); public Dictionary> OpSet => _opSet; - public OpClassifier(string pythonFileFolder) + public OpClassifier(string pythonFileFolder, IEnumerable funcNames) { DirectoryInfo directory = new DirectoryInfo(pythonFileFolder); + Dictionary fileContentMap = new(); foreach (FileInfo file in directory.GetFiles()) { if (Regex.IsMatch(file.Name, _filenamePattern)) { + Console.WriteLine(file.Name); string filenamePrefix = file.Name.Split('.')[0]; string content = File.ReadAllText(file.FullName); - var matches = Regex.Matches(content, _pythonFunctionPattern); - foreach(Match match in matches) + fileContentMap[filenamePrefix] = content; + } + } + + foreach(var funcName in funcNames) + { + Console.WriteLine(funcName); + string funcPattern = @$"^def\s+{funcName}\("; + string fallbackFuncPattern = @$"^def\s+{funcName}_eager_fallback\("; + foreach (var (target, content) in fileContentMap) + { + if(content.Contains($"def {funcName}") && content.Contains($"def {funcName}_eager_fallback")) + { + _opSet.SetDefault(target, new HashSet()).Add(funcName); + } + else if (content.Contains($"def _{funcName}") && content.Contains($"def _{funcName}_eager_fallback")) { - var funcName = match.Groups[1].Value; - if (!funcName.EndsWith("_eager_fallback")) - { - _opSet.SetDefault(filenamePrefix, new HashSet()).Add(funcName); - } + _opSet.SetDefault(target, new HashSet()).Add(funcName); } } } diff --git a/Tensorflow.CodeGen/Program.cs b/Tensorflow.CodeGen/Program.cs index d46dcdcba..a26031cb3 100644 --- a/Tensorflow.CodeGen/Program.cs +++ b/Tensorflow.CodeGen/Program.cs @@ -5,6 +5,8 @@ using System.Xml.Linq; using Tensorflow.CodeGen; +//Console.WriteLine(Utils.ConvertToUnderscore("LRN")); + GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops", @"D:\Apps\miniconda3\envs\tf2.11\Lib\site-packages\tensorflow\python\ops", @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\ops\ops.pbtxt"); diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index 61273d013..a052eb692 100644 --- a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -1,4 +1,4 @@ - + Exe @@ -9,10 +9,11 @@ + - + diff --git a/Tensorflow.CodeGen/Utils.cs b/Tensorflow.CodeGen/Utils.cs index 8cf21dee6..608222e01 100644 --- a/Tensorflow.CodeGen/Utils.cs +++ b/Tensorflow.CodeGen/Utils.cs @@ -18,15 +18,24 @@ public static string ConvertToUnderscore(string input) StringBuilder result = new StringBuilder(); - int state = 0; // the previous char was not lowered. + int state = 1; // the previous char was not lowered. for (int i = 0; i < input.Length; i++) { char current = input[i]; // 首字母不需要添加下划线 - if (i != 0 && char.IsUpper(current)) + if (char.IsUpper(current)) { - if(state == 0) + if(i > 0) + { + char pre = input[i - 1]; + if (char.IsDigit(pre)) + { + result.Append(char.ToLower(current)); + continue; + } + } + if (state == 0) { result.Append("_"); state = 1; From c1b67318439395a09ab77a6c94cd822cfd350f13 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Mon, 8 May 2023 01:57:18 +0800 Subject: [PATCH 005/182] feat: description generator of op code. --- Tensorflow.CodeGen/DescriptionGenerator.cs | 263 +++++++++++++++++++ Tensorflow.CodeGen/FunctionGenerator.cs | 201 +------------- Tensorflow.CodeGen/GenOpsWriter.cs | 26 +- Tensorflow.CodeGen/Program.cs | 3 +- Tensorflow.CodeGen/Tensorflow.CodeGen.csproj | 2 +- Tensorflow.CodeGen/Utils.cs | 199 +++++++++++++- 6 files changed, 482 insertions(+), 212 deletions(-) create mode 100644 Tensorflow.CodeGen/DescriptionGenerator.cs diff --git a/Tensorflow.CodeGen/DescriptionGenerator.cs b/Tensorflow.CodeGen/DescriptionGenerator.cs new file mode 100644 index 000000000..0437370a1 --- /dev/null +++ b/Tensorflow.CodeGen/DescriptionGenerator.cs @@ -0,0 +1,263 @@ +using Microsoft.CodeAnalysis.CSharp; +using Protobuf.Text; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection.Metadata.Ecma335; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading.Tasks; + +namespace Tensorflow.CodeGen +{ + public class DescriptionGenerator + { + private static readonly string replaceStrInner = "~~%~~"; + private static readonly string replaceStrInnerQuotationMarks = "^%^"; + Dictionary> _opDescriptions = new Dictionary>(); + Dictionary _opDescriptionDefs = new Dictionary(); + public DescriptionGenerator(string apiDefDirectory) + { + DirectoryInfo directory = new DirectoryInfo(apiDefDirectory); + + int errors = 0; + foreach (FileInfo file in directory.GetFiles()) + { + string target = file.Name.Split('.')[0].Split('_').Last(); + OpDef op = null; + try + { + op = ReadOpDefs(file.FullName).Op[0]; + } + catch + { + errors++; + continue; + } + _opDescriptionDefs[target] = op; + _opDescriptions[target] = new Dictionary(); + foreach (var arg in op.InputArg) + { + string argName = arg.Name; + var token = SyntaxFactory.ParseToken(argName); + if (token.IsKeyword()) + { + argName = $"{argName}_"; + } + _opDescriptions[target][argName] = arg.Description ?? ""; + } + foreach (var arg in op.Attr) + { + var token = SyntaxFactory.ParseToken(arg.Name); + string realKey = arg.Name; + if (token.IsKeyword()) + { + realKey += "_"; + } + _opDescriptions[target][realKey] = arg.Description ?? ""; + } + _opDescriptions[target]["SUMMARY"] = op.Summary ?? ""; + _opDescriptions[target]["DESC"] = op.Description ?? ""; + } + Console.WriteLine($"Warning: {errors} description files cannot be analyzed! Please revise it if " + + $"the failed files number is large, or ignore it."); + } + + /// + /// + /// + /// + /// + public void AppendDescription(OpDef fullOp, StringBuilder sb) + { + var opName = fullOp.Name; + if(_opDescriptions.TryGetValue(opName, out var op)) + { + var def = _opDescriptionDefs[opName]; + sb.AppendLine("/// "); + sb.AppendLine($"/// {op["SUMMARY"]}"); + sb.AppendLine("/// "); + + string totalDesc = op["DESC"]; + if (!string.IsNullOrEmpty(totalDesc)) + { + totalDesc = totalDesc.Replace(replaceStrInnerQuotationMarks, "\""); + sb.AppendLine("/// "); + string[] lines = totalDesc.Split(replaceStrInner); + foreach (var line in lines) + { + sb.AppendLine($"/// {line}"); + } + sb.AppendLine("/// "); + } + + var argNames = GetInputArgNames(fullOp); + foreach (var argName in argNames) + { + if(op.TryGetValue(argName, out var desc)) + { + desc = desc.Replace(replaceStrInnerQuotationMarks, "\""); + string[] lines = desc.Split(replaceStrInner); + sb.AppendLine($"/// "); + foreach (var line in lines) + { + sb.AppendLine($"/// {line}"); + } + sb.AppendLine("/// "); + } + else + { + sb.AppendLine($"/// "); + } + } + + List returnValueDescs = new(); + foreach (var arg in def.OutputArg) + { + if (!string.IsNullOrEmpty(arg.Description)) + { + returnValueDescs.Add($"{arg.Name}: {arg.Description}"); + } + } + string returnValueDesc = ""; + if (returnValueDescs.Count > 0) + { + returnValueDesc = string.Join(" && ", returnValueDescs); + } + sb.AppendLine($"/// {returnValueDesc}"); + } + else + { + sb.AppendLine("/// "); + sb.AppendLine($"///"); + sb.AppendLine("/// "); + + var argNames = GetInputArgNames(fullOp); + foreach (var argName in argNames) + { + sb.AppendLine($"/// "); + } + + sb.AppendLine($"/// "); + } + } + + /// + /// + /// + /// + /// + /// + /// + public List GetInputArgNames(OpDef op) + { + List names = new(); + foreach (var arg in op.InputArg) + { + string argName = arg.Name; + var token = SyntaxFactory.ParseToken(argName); + if (token.IsKeyword()) + { + argName = $"{argName}_"; + } + names.Add(argName); + } + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var dynamicDefaultValues); + foreach (var (key, typeStr, value) in attrValueDic) + { + var token = SyntaxFactory.ParseToken(key); + string realKey = key; + if (token.IsKeyword()) + { + realKey += "_"; + } + names.Add(realKey); + } + return names; + } + + private static OpList ReadOpDefs(string path) + { + var text = File.ReadAllText(path); + text = RemoveLintTags(text); + text = PreProcessText(text); + + string pattern = @"< { + string matchedText = match.Value; + string innerText = match.Groups[1].Value; + innerText = innerText.Replace("\"", replaceStrInnerQuotationMarks) + .Replace("\r\n", replaceStrInner).Replace("\n", replaceStrInner); // 替换内部换行符 + return replaceStrPrefix + innerText + replaceStrSuffix; // 替换首尾 + }, RegexOptions.Multiline); + + var opDefs = new TextParser(TextParser.Settings.Default.WithIgnoreUnknownFields(true)).Parse(replacedText); + return opDefs; + } + + static string PreProcessText(string input) + { + int depth = 0; + int endBlockDepth = -1; + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < input.Length; i++) + { + char c = input[i]; + if (c == '{') + { + depth++; + sb.Append(c); + } + else if (c == '}') + { + if (depth == endBlockDepth) + { + sb.Append("END\n"); + endBlockDepth = -1; + } + sb.Append(c); + depth--; + } + else if (c == '<' && i + 5 < input.Length && input.Substring(i, 5) == "< x.Item3 == "NOVALUE")) { var token = SyntaxFactory.ParseToken(key); @@ -226,7 +226,7 @@ public void AppendFastPathExecute(OpDef op, StringBuilder sb) } sb.Append("}, attrs = new Dictionary(){ "); - var attrValueDic = GetAttrsDefaultValue(op, out var _); + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var _); foreach (var (key, _, _) in attrValueDic) { sb.Append($"[\"{key}\"] = {key}, "); @@ -252,7 +252,7 @@ public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) } sb.Append($"{inputArgRealName}, "); } - var attrValueDic = GetAttrsDefaultValue(op, out var _); + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var _); foreach (var (key, _, _) in attrValueDic) { string keyRealName = key; @@ -439,7 +439,7 @@ public void AppendFallBackFunctionArgs(OpDef op, StringBuilder sb) sb.Append($"Tensor {argName}, "); } } - var attrValueDic = GetAttrsDefaultValue(op, out var _); + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var _); foreach (var (key, typeStr, _) in attrValueDic) { var token = SyntaxFactory.ParseToken(key); @@ -465,7 +465,7 @@ public void AppendOpHelperCall(OpDef op, StringBuilder sb) } sb.AppendLine($"keywords[\"{arg.Name}\"] = {realArgName};"); } - var attrValueDic = GetAttrsDefaultValue(op, out var _); + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var _); foreach (var (key, _, _) in attrValueDic) { sb.AppendLine($"keywords[\"{key}\"] = {key};"); @@ -473,195 +473,6 @@ public void AppendOpHelperCall(OpDef op, StringBuilder sb) sb.AppendLine($"var _op = tf.OpDefLib._apply_op_helper(\"{op.Name}\", name, keywords);"); } - // name, type string, default value - public List<(string, string, string)> GetAttrsDefaultValue(OpDef op, out Dictionary dynamicDefaultValues) - { - dynamicDefaultValues = new(); - List<(string, string, string)> res = new(); - foreach (var attr in op.Attr) - { - if (attr.Type == "type") - { - bool found = op.InputArg.Any(x => x.TypeAttr == attr.Name); - if (!found) - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) - { - string name = Enum.GetName(typeof(TF_DataType), attr.DefaultValue.Type.as_tf_dtype()); - string enumPath = typeof(TF_DataType).Name + "." + name; - res.Add((attr.Name, "TF_DataType", enumPath)); - } - else - { - res.Add((attr.Name, "TF_DataType", "NOVALUE")); - } - } - } - else if (attr.Type == "int") - { - if(op.InputArg.Any(x => x.NumberAttr == attr.Name)) - { - continue; - } - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.I) - { - res.Add((attr.Name, "int", attr.DefaultValue.I.ToString())); - } - else - { - res.Add((attr.Name, "int", "0")); - } - } - else if (attr.Type == "float") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.F) - { - res.Add((attr.Name, "float", attr.DefaultValue.F.ToString() + "f")); - } - else - { - res.Add((attr.Name, "float", "NOVALUE")); - } - } - else if (attr.Type == "string") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) - { - res.Add((attr.Name, "string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\"")); - } - else - { - res.Add((attr.Name, "string", "NOVALUE")); - } - } - else if (attr.Type == "bool") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.B) - { - res.Add((attr.Name, "bool", attr.DefaultValue.B.ToString().ToLower())); - } - else - { - res.Add((attr.Name, "bool", "NOVALUE")); - } - } - else if (attr.Type == "shape") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Shape) - { - if (attr.DefaultValue.Shape.UnknownRank) - { - res.Add((attr.Name, "Shape", $"null")); - } - else - { - Shape shape = new Shape(attr.DefaultValue.Shape); - string expression = $"new Shape({string.Join(", ", shape.dims)})"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "Shape", $"null")); - } - } - else - { - res.Add((attr.Name, "Shape", "NOVALUE")); - } - } - else if (attr.Type == "list(type)") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) - { - List values = new(); - foreach (var value in attr.DefaultValue.List.Type) - { - values.Add(value.as_tf_dtype()); - } - string expression = "new TF_DataType[]{" + $"{string.Join(", ", values)}" + "}"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "TF_DataType[]", $"null")); - } - else - { - res.Add((attr.Name, "TF_DataType[]", "NOVALUE")); - } - } - else if (attr.Type == "list(shape)") - { - res.Add((attr.Name, "Shape[]", "NOVALUE")); - } - else if (attr.Type == "list(string)") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) - { - List values = new(); - foreach (var value in attr.DefaultValue.List.S) - { - values.Add(value.ToStringUtf8()); - } - string expression = "new string[]{" + $"{string.Join(", ", values)}" + "}"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "string[]", $"null")); - } - else - { - res.Add((attr.Name, "string[]", "NOVALUE")); - } - } - else if (attr.Type == "list(int)") - { - if(attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) - { - List values = new(); - foreach(var value in attr.DefaultValue.List.I) - { - values.Add((int)value); - } - string expression = "new int[]{" + $"{string.Join(", ", values)}" +"}"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "int[]", $"null")); - } - else - { - res.Add((attr.Name, "int[]", "NOVALUE")); - } - } - else if (attr.Type == "list(float)") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) - { - List values = new(); - foreach (var value in attr.DefaultValue.List.F) - { - values.Add(value); - } - string expression = "new float[]{" + $"{string.Join(", ", values)}" + "}"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "float[]", $"null")); - } - else - { - res.Add((attr.Name, "float[]", "NOVALUE")); - } - } - else if (attr.Type == "func") - { - res.Add((attr.Name, "Func", "NOVALUE")); - } - else if (attr.Type == "list(func)") - { - res.Add((attr.Name, "Func[]", "NOVALUE")); - } - else if (attr.Type == "tensor") - { - res.Add((attr.Name, "TensorProto", "NOVALUE")); - } - else - { - throw new NotImplementedException(); - } - } - return res; - } - private static bool HasRefArgs(OpDef op) { return op.InputArg.Any(x => x.IsRef); diff --git a/Tensorflow.CodeGen/GenOpsWriter.cs b/Tensorflow.CodeGen/GenOpsWriter.cs index 2cd7bca50..7601acdbb 100644 --- a/Tensorflow.CodeGen/GenOpsWriter.cs +++ b/Tensorflow.CodeGen/GenOpsWriter.cs @@ -12,16 +12,18 @@ public class GenOpsWriter private string _basePath; private Dictionary _opMap; private OpClassifier _opClassifier; - private FunctionGenerator _g = new(); + private FunctionGenerator _fg = new(); + private DescriptionGenerator _dg; - public GenOpsWriter(string basePath, string pythonFilesDirectory, string opDefFilename) + public GenOpsWriter(string basePath, string pythonFilesDirectory, string apiDefFilesDirectory, string opDefFilename) { _basePath = basePath; - var opDefs = ReadAllOpDefs(opDefFilename); + var opDefs = Utils.ReadAllOpDefs(opDefFilename); _opMap = opDefs.Op.ToDictionary( - x => Tensorflow.CodeGen.Utils.ConvertToUnderscore(x.Name), x => x); + x => Utils.ConvertToUnderscore(x.Name), x => x); _opClassifier = new OpClassifier(pythonFilesDirectory, opDefs.Op.Select(x => Utils.ConvertToUnderscore(x.Name))); + _dg = new DescriptionGenerator(apiDefFilesDirectory); } public void WriteAll() @@ -53,12 +55,17 @@ public void WriteAll() if(_opMap.ContainsKey(funcName)) { var opDef = _opMap[funcName]; - _g.AppendFunction(opDef, sb); + + // write the descriptions. + _dg.AppendDescription(opDef, sb); + + // write the function body. + _fg.AppendFunction(opDef, sb); } else if (funcName.StartsWith("_")) { var opDef = _opMap[funcName.Substring(1)]; - _g.AppendFunction(opDef, sb); + _fg.AppendFunction(opDef, sb); } } @@ -69,12 +76,5 @@ public void WriteAll() File.WriteAllText(fullFilePath, sb.ToString()); } } - - private OpList ReadAllOpDefs(string path) - { - var text = File.ReadAllText(path); - var opDefs = OpList.Parser.ParseText(text); - return opDefs; - } } } diff --git a/Tensorflow.CodeGen/Program.cs b/Tensorflow.CodeGen/Program.cs index a26031cb3..f9d44ce83 100644 --- a/Tensorflow.CodeGen/Program.cs +++ b/Tensorflow.CodeGen/Program.cs @@ -5,10 +5,9 @@ using System.Xml.Linq; using Tensorflow.CodeGen; -//Console.WriteLine(Utils.ConvertToUnderscore("LRN")); - GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops", @"D:\Apps\miniconda3\envs\tf2.11\Lib\site-packages\tensorflow\python\ops", + @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\api_def\base_api", @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\ops\ops.pbtxt"); writer.WriteAll(); diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index a052eb692..865db126b 100644 --- a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -9,11 +9,11 @@ - + diff --git a/Tensorflow.CodeGen/Utils.cs b/Tensorflow.CodeGen/Utils.cs index 608222e01..d3f30d9f2 100644 --- a/Tensorflow.CodeGen/Utils.cs +++ b/Tensorflow.CodeGen/Utils.cs @@ -1,4 +1,5 @@ -using System; +using Protobuf.Text; +using System; using System.Collections.Generic; using System.Linq; using System.Reflection.Metadata.Ecma335; @@ -51,5 +52,201 @@ public static string ConvertToUnderscore(string input) return result.ToString(); } + + public static OpList ReadAllOpDefs(string path) + { + var text = File.ReadAllText(path); + var opDefs = OpList.Parser.ParseText(text); + return opDefs; + } + + // name, type string, default value + public static List<(string, string, string)> GetAttrsDefaultValue(OpDef op, out Dictionary dynamicDefaultValues) + { + dynamicDefaultValues = new(); + List<(string, string, string)> res = new(); + foreach (var attr in op.Attr) + { + if (attr.Type == "type") + { + bool found = op.InputArg.Any(x => x.TypeAttr == attr.Name); + if (!found) + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) + { + string name = Enum.GetName(typeof(TF_DataType), attr.DefaultValue.Type.as_tf_dtype()); + string enumPath = typeof(TF_DataType).Name + "." + name; + res.Add((attr.Name, "TF_DataType", enumPath)); + } + else + { + res.Add((attr.Name, "TF_DataType", "NOVALUE")); + } + } + } + else if (attr.Type == "int") + { + if (op.InputArg.Any(x => x.NumberAttr == attr.Name)) + { + continue; + } + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.I) + { + res.Add((attr.Name, "int", attr.DefaultValue.I.ToString())); + } + else + { + res.Add((attr.Name, "int", "0")); + } + } + else if (attr.Type == "float") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.F) + { + res.Add((attr.Name, "float", attr.DefaultValue.F.ToString() + "f")); + } + else + { + res.Add((attr.Name, "float", "NOVALUE")); + } + } + else if (attr.Type == "string") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + { + res.Add((attr.Name, "string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\"")); + } + else + { + res.Add((attr.Name, "string", "NOVALUE")); + } + } + else if (attr.Type == "bool") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.B) + { + res.Add((attr.Name, "bool", attr.DefaultValue.B.ToString().ToLower())); + } + else + { + res.Add((attr.Name, "bool", "NOVALUE")); + } + } + else if (attr.Type == "shape") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Shape) + { + if (attr.DefaultValue.Shape.UnknownRank) + { + res.Add((attr.Name, "Shape", $"null")); + } + else + { + Shape shape = new Shape(attr.DefaultValue.Shape); + string expression = $"new Shape({string.Join(", ", shape.dims)})"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "Shape", $"null")); + } + } + else + { + res.Add((attr.Name, "Shape", "NOVALUE")); + } + } + else if (attr.Type == "list(type)") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.Type) + { + values.Add(value.as_tf_dtype()); + } + string expression = "new TF_DataType[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "TF_DataType[]", $"null")); + } + else + { + res.Add((attr.Name, "TF_DataType[]", "NOVALUE")); + } + } + else if (attr.Type == "list(shape)") + { + res.Add((attr.Name, "Shape[]", "NOVALUE")); + } + else if (attr.Type == "list(string)") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.S) + { + values.Add(value.ToStringUtf8()); + } + string expression = "new string[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "string[]", $"null")); + } + else + { + res.Add((attr.Name, "string[]", "NOVALUE")); + } + } + else if (attr.Type == "list(int)") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.I) + { + values.Add((int)value); + } + string expression = "new int[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "int[]", $"null")); + } + else + { + res.Add((attr.Name, "int[]", "NOVALUE")); + } + } + else if (attr.Type == "list(float)") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.F) + { + values.Add(value); + } + string expression = "new float[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "float[]", $"null")); + } + else + { + res.Add((attr.Name, "float[]", "NOVALUE")); + } + } + else if (attr.Type == "func") + { + res.Add((attr.Name, "Func", "NOVALUE")); + } + else if (attr.Type == "list(func)") + { + res.Add((attr.Name, "Func[]", "NOVALUE")); + } + else if (attr.Type == "tensor") + { + res.Add((attr.Name, "TensorProto", "NOVALUE")); + } + else + { + throw new NotImplementedException(); + } + } + return res; + } } } From 1c8f0a2d14df2f0c335e378ae16cc6c8ba222aa4 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Mon, 8 May 2023 02:00:08 +0800 Subject: [PATCH 006/182] refactor: gen_nn_ops, gen_math_ops, gen_array_ops and related codes. --- src/TensorFlowNET.Console/MemoryBasicTest.cs | 4 +- src/TensorFlowNET.Core/APIs/tf.array.cs | 16 +- src/TensorFlowNET.Core/APIs/tf.math.cs | 39 +- src/TensorFlowNET.Core/APIs/tf.nn.cs | 25 +- src/TensorFlowNET.Core/APIs/tf.reshape.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.tensor.cs | 8 +- src/TensorFlowNET.Core/APIs/tf.tile.cs | 2 +- .../Attributes/c_api.ops.cs | 15 + .../_InitializeClustersOpFactory.cs | 2 +- .../Contexts/Context.ExecuteOp.cs | 2 +- .../Eager/EagerRunner.TFE_FastPathExecute.cs | 4 +- .../Eager/FastPathOpExecInfo.cs | 3 +- src/TensorFlowNET.Core/Eager/execute.cs | 12 +- .../Functions/EagerDefinedFunction.cs | 2 +- .../Gradients/GradientTape.cs | 9 + .../Gradients/array_grad.cs | 15 +- src/TensorFlowNET.Core/Gradients/math_grad.cs | 19 +- .../Gradients/math_grad_eager.cs | 4 +- src/TensorFlowNET.Core/Gradients/nn_grad.cs | 48 +- .../Operations/NnOps/AveragePoolFunction.cs | 2 +- .../Operations/NnOps/ConvolutionInternal.cs | 38 +- .../Operations/NnOps/gen_nn_ops.cs | 373 - .../Operations/OpDefLibrary.cs | 5 + .../Operations/Operation.cs | 64 +- .../Operations/array_ops.cs | 86 +- .../Operations/dataset_ops.cs | 4 +- .../Operations/gen_array_ops.cs | 10688 +++++++++++++++- .../Operations/gen_functional_ops.cs | 12 +- .../Operations/gen_io_ops.cs | 1378 ++ .../Operations/gen_logging_ops.cs | 2 +- .../Operations/gen_math_ops.cs | 10018 ++++++++++++++- .../Operations/gen_math_ops.eager.cs | 11 - .../Operations/gen_nn_ops.cs | 8084 ++++++++++++ src/TensorFlowNET.Core/Operations/gen_ops.cs | 22 +- .../Operations/gen_resource_variable_ops.cs | 10 +- .../Operations/image_ops_impl.cs | 26 +- src/TensorFlowNET.Core/Operations/io_ops.cs | 6 +- src/TensorFlowNET.Core/Operations/math_ops.cs | 45 +- .../Operations/nn_impl.py.cs | 2 +- src/TensorFlowNET.Core/Operations/nn_ops.cs | 11 +- .../Tensors/Ragged/RowPartition.cs | 2 +- .../Tensors/Tensor.Operators.cs | 176 +- src/TensorFlowNET.Core/Tensors/Tensors.cs | 3 + .../Training/Saving/BaseSaverBuilder.cs | 2 +- .../DataAdapters/TensorLikeDataAdapter.cs | 5 +- src/TensorFlowNET.Keras/Layers/Core/Dense.cs | 2 +- src/TensorFlowNET.Keras/Losses/Huber.cs | 2 +- src/TensorFlowNET.Keras/Losses/LogCosh.cs | 3 +- .../Losses/MeanAbsoluteError.cs | 2 +- .../Losses/MeanAbsolutePercentageError.cs | 2 +- .../Losses/MeanSquaredError.cs | 2 +- .../Losses/MeanSquaredLogarithmicError.cs | 10 +- .../ControlFlowTest/WhileContextTestCase.cs | 4 +- .../GradientTest/GradientTest.cs | 2 +- .../ManagedAPI/ArrayOpsTest.cs | 6 +- 55 files changed, 29617 insertions(+), 1724 deletions(-) delete mode 100644 src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs create mode 100644 src/TensorFlowNET.Core/Operations/gen_io_ops.cs delete mode 100644 src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs create mode 100644 src/TensorFlowNET.Core/Operations/gen_nn_ops.cs diff --git a/src/TensorFlowNET.Console/MemoryBasicTest.cs b/src/TensorFlowNET.Console/MemoryBasicTest.cs index 3b0deeabb..2bb11a02d 100644 --- a/src/TensorFlowNET.Console/MemoryBasicTest.cs +++ b/src/TensorFlowNET.Console/MemoryBasicTest.cs @@ -112,7 +112,7 @@ public Action Conv2DWithTensor var strides = new[] { 1, 1, 1, 1 }; var dilations = new[] { 1, 1, 1, 1 }; - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Conv2D", null, input, filter) + var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "Conv2D", null, input, filter) { attrs = ConvertToDict(new { @@ -134,7 +134,7 @@ public Action Conv2DWithVariable var strides = new[] { 1, 1, 1, 1 }; var dilations = new[] { 1, 1, 1, 1 }; - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Conv2D", null, input, filter) + var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "Conv2D", null, input, filter) { attrs = ConvertToDict(new { diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs index a2c91983e..6a646512a 100644 --- a/src/TensorFlowNET.Core/APIs/tf.array.cs +++ b/src/TensorFlowNET.Core/APIs/tf.array.cs @@ -44,7 +44,8 @@ public partial class tensorflow /// /// public Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null) - => gen_array_ops.batch_to_space_nd(input, block_shape, crops, name: name); + => gen_array_ops.batch_to_space_nd(ops.convert_to_tensor(input), ops.convert_to_tensor(block_shape), + ops.convert_to_tensor(crops), name: name); /// /// Apply boolean mask to tensor. @@ -91,7 +92,7 @@ public Tensor concat(IEnumerable values, int axis, string name = "concat }); } - return gen_array_ops.concat_v2(values.ToArray(), axis, name: name); + return gen_array_ops.concat_v2(values.ToArray(), ops.convert_to_tensor(axis), name: name); } /// @@ -115,7 +116,7 @@ public Tensor expand_dims(Tensor input, int axis = -1, string name = null) /// /// public Tensor fill(Tensor dims, T value, string name = null) - => gen_array_ops.fill(dims, value, name: name); + => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name); public Tensor fill(Shape dims, T value, string name = null) => array_ops.fill(dims, value, name: name); @@ -138,7 +139,7 @@ public Tensor identity(Tensor input, string name = null) /// /// public Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0) - => array_ops.gather(@params, indices, name: name, axis: axis); + => array_ops.gather(@params, indices, name: name, axis: ops.convert_to_tensor(axis)); /// /// Return the elements, either from `x` or `y`, depending on the `condition`. @@ -166,7 +167,7 @@ public Tensor transpose(T1 a, Axis perm = null, string name = "transpose", b /// /// public Tensor reverse(Tensor tensor, int[] axis, string name = null) - => gen_array_ops.reverse(tensor, axis, name: name); + => gen_array_ops.reverse(tensor, ops.convert_to_tensor(axis), name: name); public Tensor reverse(Tensor tensor, Tensor axis, string name = null) => gen_array_ops.reverse(tensor, axis, name: name); @@ -189,7 +190,8 @@ public Tensor rank(Tensor input, string name = null) /// A name for the operation (optional). /// A `Tensor` the same type as `input`. public Tensor slice(Tensor input, Tb[] begin, Ts[] size, string name = null) - => array_ops.slice(input, begin, size, name: name); + => array_ops.slice(input, begin.Select(x => ops.convert_to_tensor(x)).ToArray(), + size.Select(x => ops.convert_to_tensor(x)).ToArray(), name: name); public Tensor squeeze(Tensor input, int axis, string name = null, int squeeze_dims = -1) => array_ops.squeeze(input, new[] { axis }, name); @@ -255,7 +257,7 @@ public Tensor pad(Tensor tensor, Tensor paddings, string mode = "CONSTANT", stri /// A name for the operation (optional). /// A `Tensor`. Has the same type as `input`. public Tensor placeholder_with_default(T input, int[] shape, string name = null) - => gen_array_ops.placeholder_with_default(input, shape, name: name); + => gen_array_ops.placeholder_with_default(ops.convert_to_tensor(input), shape, name: name); /// /// Returns the shape of a tensor. diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index 83653c8bb..75253700a 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -130,7 +130,7 @@ public Tensor add(Tensor a, Tensor b, string name = null) => gen_math_ops.add(a, b, name: name); public Tensor add(Tx a, Ty b, string name = null) - => gen_math_ops.add(a, b, name: name); + => gen_math_ops.add(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name); /// /// Adds all input tensors element-wise. @@ -151,10 +151,10 @@ public Tensor atan(Tensor x, string name = null) => gen_math_ops.atan(x, name); public Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => gen_math_ops.arg_max(input, dimension, output_type: output_type, name: name); + => gen_math_ops.arg_max(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name); public Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => gen_math_ops.arg_min(input, dimension, output_type: output_type, name: name); + => gen_math_ops.arg_min(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name); public Tensor is_finite(Tensor input, string name = null) => gen_math_ops.is_finite(input, name); @@ -199,7 +199,7 @@ public Tensor cos(Tensor x, string name = null) => gen_math_ops.cos(x, name); public Tensor cos(float x, string name = null) - => gen_math_ops.cos(x, name); + => gen_math_ops.cos(ops.convert_to_tensor(x), name); /// /// Computes hyperbolic cosine of x element-wise. @@ -235,7 +235,7 @@ public Tensor floor(Tensor x, string name = null) /// /// public Tensor greater(Tx x, Ty y, string name = null) - => gen_math_ops.greater(x, y, name); + => gen_math_ops.greater(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Returns the truth value of (x >= y) element-wise. @@ -247,7 +247,7 @@ public Tensor greater(Tx x, Ty y, string name = null) /// /// public Tensor greater_equal(Tx x, Ty y, string name = null) - => gen_math_ops.greater_equal(x, y, name); + => gen_math_ops.greater_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Returns the truth value of (x < y) element-wise. @@ -259,7 +259,7 @@ public Tensor greater_equal(Tx x, Ty y, string name = null) /// /// public Tensor less(Tx x, Ty y, string name = null) - => gen_math_ops.less(x, y, name); + => gen_math_ops.less(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Computes the log of the absolute value of `Gamma(x)` element-wise. @@ -280,7 +280,7 @@ public Tensor lgamma(Tensor x, string name = null) /// /// public Tensor less_equal(Tx x, Ty y, string name = null) - => gen_math_ops.less_equal(x, y, name); + => gen_math_ops.less_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Computes natural logarithm of (1 + x) element-wise. @@ -292,7 +292,7 @@ public Tensor log1p(Tensor x, string name = null) => gen_math_ops.log1p(x, name); public Tensor logical_and(T x, T y, string name = null) - => gen_math_ops.logical_and(x, y, name); + => gen_math_ops.logical_and(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); public Tensor logical_not(Tensor x, string name = null) => gen_math_ops.logical_not(x, name); @@ -301,7 +301,10 @@ public Tensor logical_or(Tensor x, Tensor y, string name = null) => gen_math_ops.logical_or(x, y, name); public Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor") - => gen_math_ops.logical_xor(x, y, name); + { + return gen_math_ops.logical_and(gen_math_ops.logical_or(x, y), + gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), name); + } /// /// Clips tensor values to a specified min and max. @@ -312,7 +315,7 @@ public Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor") /// /// public Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null) - => gen_math_ops._clip_by_value(t, clip_value_min, clip_value_max); + => gen_math_ops.clip_by_value(t, clip_value_min, clip_value_max); /// /// Clips tensor values to a specified min and max. @@ -345,7 +348,7 @@ public Tensor clip_by_value(Tensor t, T1 clip_value_min, T2 clip_value_m => clip_ops.clip_by_value(t, clip_value_min, clip_value_max, name); public Tensor sub(Tx a, Ty b, string name = null) - => gen_math_ops.sub(a, b, name: name); + => gen_math_ops.sub(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name); public Tensor divide(Tensor a, Tensor b) => a / b; @@ -396,7 +399,7 @@ public Tensor atan2(Tensor y, Tensor x, string name = null) /// /// public Tensor max(Tx input, Ty axis, bool keep_dims = false, string name = null) - => gen_math_ops._max(input, axis, keep_dims: keep_dims, name: name); + => gen_math_ops.max(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name); /// /// Computes the minimum of elements across dimensions of a tensor. @@ -409,7 +412,7 @@ public Tensor max(Tx input, Ty axis, bool keep_dims = false, string name /// /// public Tensor min(Tx input, Ty axis, bool keep_dims = false, string name = null) - => gen_math_ops._min(input, axis, keep_dims: keep_dims, name: name); + => gen_math_ops.min(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name); /// /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. @@ -421,7 +424,7 @@ public Tensor min(Tx input, Ty axis, bool keep_dims = false, string name /// /// public Tensor maximum(T1 x, T2 y, string name = null) - => gen_math_ops.maximum(x, y, name: name); + => gen_math_ops.maximum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); /// /// Returns the min of x and y (i.e. x < y ? x : y) element-wise. @@ -433,7 +436,7 @@ public Tensor maximum(T1 x, T2 y, string name = null) /// /// public Tensor minimum(T1 x, T2 y, string name = null) - => gen_math_ops.minimum(x, y, name: name); + => gen_math_ops.minimum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public Tensor multiply(Tensor x, Tensor y, string name = null) => gen_math_ops.mul(x, y, name: name); @@ -448,7 +451,7 @@ public Tensor multiply(Tensor x, Tensor y, string name = null) /// /// public Tensor multiply(Tx x, Ty y, string name = null) - => gen_math_ops.mul(x, y, name: name); + => gen_math_ops.mul(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public Tensor negative(Tensor x, string name = null) => gen_math_ops.neg(x, name); @@ -577,7 +580,7 @@ public Tensor sigmoid(T x, string name = null) => math_ops.sigmoid(x, name: name); public Tensor sum(Tensor input, int axis, bool keep_dims = false, string name = null) - => gen_math_ops._sum(input, axis, keep_dims: keep_dims, name: name); + => gen_math_ops.sum(input, ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name); public Tensor reduce_mean(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null, int? reduction_indices = null) => math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices); diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index 1595e52fc..e0c29bfa7 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -29,21 +29,8 @@ public class nn_internal public Tensor conv2d(Tensor input, Tensor filter, int[] strides, string padding, bool use_cudnn_on_gpu = true, string data_format = "NHWC", int[] dilations = null, string name = null) { - var parameters = new Conv2dParams - { - Input = input, - Filter = filter, - Strides = strides, - Padding = padding, - UseCudnnOnGpu = use_cudnn_on_gpu, - DataFormat = data_format, - Name = name - }; - - if (dilations != null) - parameters.Dilations = dilations; - - return gen_nn_ops.conv2d(parameters); + return gen_nn_ops.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, + data_format: data_format, dilations: dilations, name: name); } public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null) @@ -118,7 +105,7 @@ public Tensor embedding_lookup(Tensor @params, public IActivation softmax() => new softmax(); public Tensor tanh(Tensor x, string name = null) - => gen_nn_ops.tanh(x, name); + => gen_math_ops.tanh(x, name); public Tensor relu(Tensor features, string name = null) => gen_nn_ops.relu(features, name); @@ -146,14 +133,14 @@ public Tensor in_top_k(Tensor predictions, Tensor targets, int k, string name = => nn_ops.in_top_k(predictions, targets, k, name); public Tensor[] top_k(Tensor input, int k = 1, bool sorted = true, string name = null) - => gen_nn_ops.top_kv2(input, k: k, sorted: sorted, name: name); + => gen_nn_ops.top_kv2(input, k: ops.convert_to_tensor(k), sorted: sorted, name: name); public Tensor bias_add(Tensor value, IVariableV1 bias, string data_format = null, string name = null) { return tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => { name = scope; - return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); + return gen_nn_ops.bias_add(value, ops.convert_to_tensor(bias), data_format: data_format, name: name); }); } @@ -172,7 +159,7 @@ public Tensor l2_loss(Tensor t, string name = null) /// public Tensor lrn(Tensor input, int depth_radius = 5, int bias = 1, int alpha = 1, float beta = 0.5f, string name = null) - => gen_nn_ops.local_response_normalization(input, depth_radius: depth_radius, bias: bias, + => gen_nn_ops.lrn(input, depth_radius: depth_radius, bias: bias, alpha: alpha, beta: beta, name: name); public Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) diff --git a/src/TensorFlowNET.Core/APIs/tf.reshape.cs b/src/TensorFlowNET.Core/APIs/tf.reshape.cs index cdd5194a2..5da7b795f 100644 --- a/src/TensorFlowNET.Core/APIs/tf.reshape.cs +++ b/src/TensorFlowNET.Core/APIs/tf.reshape.cs @@ -31,6 +31,6 @@ public Tensor reshape(Tensor tensor, public Tensor reshape(Tensor tensor, object[] shape, string name = null) - => gen_array_ops.reshape(tensor, shape, name); + => gen_array_ops.reshape(tensor, ops.convert_to_tensor(shape), name); } } diff --git a/src/TensorFlowNET.Core/APIs/tf.tensor.cs b/src/TensorFlowNET.Core/APIs/tf.tensor.cs index 35efde06b..be8c2ab24 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tensor.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tensor.cs @@ -46,10 +46,10 @@ public Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides = n int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, - string name = null) => gen_array_ops.strided_slice(input: input, - begin: begin, - end: end, - strides: strides, + string name = null) => array_ops.strided_slice(input, + begin: ops.convert_to_tensor(begin), + end: ops.convert_to_tensor(end), + strides: ops.convert_to_tensor(strides), begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs index be03e453c..65975ac83 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tile.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs @@ -23,7 +23,7 @@ public Tensor tile(Tensor input, Tensor multiples, string name = null) => gen_array_ops.tile(input, multiples, name); public Tensor tile(Tensor input, object[] multiples, string name = null) - => gen_array_ops.tile(input, multiples, name); + => gen_array_ops.tile(input, ops.convert_to_tensor(multiples), name); public Tensor tile(Tensor input, Shape multiples, string name = null) { diff --git a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs index 2a22413b0..ba6f653a1 100644 --- a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs +++ b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs @@ -57,6 +57,21 @@ public partial class c_api [DllImport(TensorFlowLibName)] public static extern int TF_OperationGetAttrValueProto(IntPtr oper, string attr_name, SafeBufferHandle output_attr_value, SafeStatusHandle status); + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrType(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status); + + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrInt(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status); + + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrFloat(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status); + + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrBool(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status); + + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrShape(IntPtr oper, string attr_name, long[] value, int num_dims, SafeStatusHandle status); + [DllImport(TensorFlowLibName)] public static extern void TF_SetAttrBool(IntPtr desc, string attr_name, bool value); diff --git a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs index adb26ef29..1b295fcfd 100644 --- a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs +++ b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs @@ -88,7 +88,7 @@ private Tensor _initialize() public Tensor op() { - var x = control_flow_ops.cond(gen_math_ops.equal(_num_remaining, 0), + var x = control_flow_ops.cond(gen_math_ops.equal(_num_remaining, ops.convert_to_tensor(0)), () => { return check_ops.assert_equal(_cluster_centers_initialized, true); diff --git a/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs b/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs index ac1cd8660..f6e0911ca 100644 --- a/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs +++ b/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs @@ -49,7 +49,7 @@ Tensors ExecGraphAction(string OpType, string Name, ExecuteOpArgs args) Tensors ExecEagerAction(string OpType, string Name, ExecuteOpArgs args) { - var opExecInfo = new FastPathOpExecInfo(OpType, Name, args.OpInputArgs) + var opExecInfo = new FastPathOpExecInfo(tf.Context, OpType, Name, args.OpInputArgs) { attrs = args.OpAttrs }; diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs index fedc02cb9..f1a09ed7b 100644 --- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs +++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs @@ -68,7 +68,8 @@ public Tensor[] TFE_FastPathExecute(FastPathOpExecInfo op_exec_info) var input_arg = op_def.InputArg[i]; if (!string.IsNullOrEmpty(input_arg.NumberAttr)) { - int len = (input as object[]).Length; + var fast_input_array = input is Tensors tensors ? (object[])tensors : (object[])input; + int len = fast_input_array.Length; c_api.TFE_OpSetAttrInt(op, input_arg.NumberAttr, len); if (op_exec_info.run_callbacks) { @@ -79,7 +80,6 @@ public Tensor[] TFE_FastPathExecute(FastPathOpExecInfo op_exec_info) if (len > 0) { - var fast_input_array = (object[])op_exec_info.args[i]; // First item adds the type attr. if (!AddInputToOp(fast_input_array[i], true, input_arg, flattened_attrs, flattened_inputs, op, status)) return null; diff --git a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs index 2cdf025a1..307ca2ce4 100644 --- a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs +++ b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs @@ -17,8 +17,9 @@ public class FastPathOpExecInfo public bool run_callbacks { get; set; } public Action callbacks { get; set; } - public FastPathOpExecInfo(string opName, string name, params object[] inputArgs) + public FastPathOpExecInfo(Context ctx, string opName, string name, params object[] inputArgs) { + this.ctx = ctx; this.op_name = opName; this.name = name; this.args = inputArgs; diff --git a/src/TensorFlowNET.Core/Eager/execute.cs b/src/TensorFlowNET.Core/Eager/execute.cs index 1804992ac..e981c6c51 100644 --- a/src/TensorFlowNET.Core/Eager/execute.cs +++ b/src/TensorFlowNET.Core/Eager/execute.cs @@ -7,10 +7,11 @@ using static Tensorflow.ApiDef.Types; using static Tensorflow.CostGraphDef.Types; using static Tensorflow.Binding; +using Tensorflow.Gradients; namespace Tensorflow.Eager { - internal static class execute + internal static class _execute { public static (DataType[], Tensor[]) onvert_to_mixed_eager_tensors(Tensor[] values, Context ctx) { @@ -18,7 +19,7 @@ public static (DataType[], Tensor[]) onvert_to_mixed_eager_tensors(Tensor[] valu var types = v.Select(t => t.dtype.as_datatype_enum()); return (types.ToArray(), v.ToArray()); } - public static Tensor[] executes(string op_name, int num_outputs, Tensor[] inputs, object[] attrs, Context ctx, string name = null) + public static Tensor[] execute(string op_name, int num_outputs, Tensor[] inputs, object[] attrs, Context ctx, string name = null) { return quick_execute(op_name, num_outputs, inputs, attrs, ctx, name); } @@ -33,7 +34,12 @@ public static Tensor[] quick_execute(string op_name, int num_outputs, Tensor[] i } public static bool must_record_gradient() { - return false; + return tf.GetTapeSet().Count != 0; + } + + public static bool record_gradient(string op_name, Tensor[] inputs, object[] attrs, Tensor[] results) + { + return tf.Runner.RecordGradient(op_name, inputs, attrs, results); } } } diff --git a/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs b/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs index cc38683db..d547b6120 100644 --- a/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs +++ b/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs @@ -147,7 +147,7 @@ public unsafe Tensors Call(Tensors args) Tensor[] outputs; if (executing_eagerly) { - outputs = execute.executes( + outputs = _execute.execute( Signature.Name, _num_outputs, args, diff --git a/src/TensorFlowNET.Core/Gradients/GradientTape.cs b/src/TensorFlowNET.Core/Gradients/GradientTape.cs index b5fd373e9..a714436a3 100644 --- a/src/TensorFlowNET.Core/Gradients/GradientTape.cs +++ b/src/TensorFlowNET.Core/Gradients/GradientTape.cs @@ -44,6 +44,15 @@ public ITape PushTape(bool persistent = false, return tape; } + public void PushTape(ITape tape) + { + // Enters a context inside which operations are recorded on this tape. + if (tf.Context.executing_eagerly()) + tf.Context.ensure_initialized(); + + _tapeSet.Push(tape); + } + ITape PopTape() { _tape.StopRecord(); diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs index c4cb9fbd1..f939f7b69 100644 --- a/src/TensorFlowNET.Core/Gradients/array_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs @@ -36,8 +36,7 @@ public static Tensor[] _BroadcastToGrad(Operation op, Tensor[] grads) var input_value = op.inputs[0]; var broadcast_shape = op.inputs[1]; var input_value_shape = array_ops.shape(input_value); - var (_, reduction_axes) = gen_array_ops.broadcast_gradient_args(broadcast_shape, - input_value_shape); + var reduction_axes = gen_array_ops.broadcast_gradient_args(broadcast_shape, input_value_shape)[1]; var updates_grad_reshaped = math_ops.reduce_sum(grad, axis: reduction_axes, keepdims: true); @@ -351,16 +350,16 @@ public static Tensor[] _StridedSliceGradGrad(Operation op, Tensor[] grads) null, null, null, - gen_array_ops.strided_slice( + array_ops.strided_slice( grad, begin, end, strides, - begin_mask: op.get_attr("begin_mask"), - end_mask: op.get_attr("end_mask"), - ellipsis_mask: op.get_attr("ellipsis_mask"), - new_axis_mask: op.get_attr("new_axis_mask"), - shrink_axis_mask: op.get_attr("shrink_axis_mask")) + begin_mask: (int)op.get_attr("begin_mask"), + end_mask: (int)op.get_attr("end_mask"), + ellipsis_mask: (int)op.get_attr("ellipsis_mask"), + new_axis_mask: (int)op.get_attr("new_axis_mask"), + shrink_axis_mask: (int)op.get_attr("shrink_axis_mask")) }; } diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index 89699d6bc..be1fbbba7 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -53,7 +53,8 @@ public static Tensor[] _AddGrad(Operation op, Tensor[] grads) var sx = array_ops.shape(x); var sy = array_ops.shape(y); - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); var sum1 = math_ops.reduce_sum(grad, rx); var r1 = gen_array_ops.reshape(sum1, sx); @@ -101,7 +102,8 @@ public static Tensor[] _DivNoNanGrad(Operation op, Tensor[] grads) var y = op.inputs[1]; var sx = array_ops.shape(x); var sy = array_ops.shape(y); - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); x = math_ops.conj(x); y = math_ops.conj(y); @@ -427,7 +429,8 @@ private static Tensor[] _MaximumMinimumGrad(bool isMaximum, Operation op, Tensor isMaximum ? gen_math_ops.greater_equal(x, y) : gen_math_ops.less_equal(x, y); - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); var xgrad = array_ops.where(xmask, grad, zeros); var gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx); var ygrad = array_ops.where(xmask, zeros, grad); @@ -458,7 +461,7 @@ public static Tensor[] _SelectGrad(Operation op, Tensor[] grads) private static Tensor _safe_shape_div(Tensor x, Tensor y) { - return math_ops.floordiv(x, gen_math_ops.maximum(y, 1)); + return math_ops.floordiv(x, gen_math_ops.maximum(y, ops.convert_to_tensor(1))); } [RegisterGradient("Sub")] @@ -573,7 +576,8 @@ public static Tensor[] _RealDivGrad(Operation op, Tensor[] grads) var sx = array_ops.shape(x); var sy = array_ops.shape(y); - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); x = math_ops.conj(x); y = math_ops.conj(y); @@ -824,7 +828,7 @@ public static Tensor[] _PowGrad(Operation op, Tensor[] grads) mask = x > 0.0f; var ones = array_ops.ones_like(x); var safe_x = array_ops.where(mask, x, ones); - var x1 = gen_array_ops.log(safe_x); + var x1 = math_ops.log(safe_x); var y1 = array_ops.zeros_like(x); var log_x = array_ops.where(mask, x1, y1); var mul1 = grad * z * log_x; @@ -855,7 +859,8 @@ public static (Tensor, Tensor, bool)[] SmartBroadcastGradientArgs(Tensor x, Tens sy = array_ops.shape_internal(y, optimize: false); } - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); return new[] { (sx, rx, !x.shape.Equals(grad.shape)), diff --git a/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs b/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs index 530bb6c08..f8b16090f 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs @@ -47,8 +47,8 @@ public static Tensor[] _MulGrad(EagerOperation op, IntPtr[] grads) { return new Tensor[] { - gen_math_ops.mul(grad, y), - gen_math_ops.mul(grad, x) + math_ops.multiply(grad, y), + math_ops.multiply(grad, x) }; } diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs index e95163930..a1ac97a97 100644 --- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs @@ -192,17 +192,8 @@ public static Tensor[] _Conv2DBackpropInputGrad(Operation op, Tensor[] grads) explicit_paddings: explicit_paddings, dilations: dilations, data_format: data_format), - gen_nn_ops.conv2d(new Conv2dParams - { - Input = grad, - Filter = op.inputs[1], - Strides = strides, - Padding = padding, - DataFormat = data_format, - Dilations = dilations, - ExplicitPaddings = explicit_paddings, - UseCudnnOnGpu = use_cudnn_on_gpu - }) + gen_nn_ops.conv2d(grad, op.inputs[1], strides, padding, + use_cudnn_on_gpu, explicit_paddings, data_format, dilations) }; } @@ -265,20 +256,27 @@ public static Tensor[] _BaseFusedBatchNormGrad(Operation op, int version, Tensor var epsilon = op.get_attr("epsilon"); var data_format = op.get_attr("data_format"); var is_training = op.get_attr("is_training"); - Func grad_fun = null; - - switch (version) + Func grad_fun = (p) => { - case 2: - grad_fun = gen_nn_ops.fused_batch_norm_grad_v3; - break; - case 1: - // grad_fun = gen_nn_ops.fused_batch_norm_grad_v2; - throw new NotImplementedException(""); - default: - grad_fun = gen_nn_ops.fused_batch_norm_grad; - break; - } + if(version == 2) + { + return gen_nn_ops.fused_batch_norm_grad_v3(p.YBackprop, p.X, p.Scale, + p.ReserveSpace1, p.ReserveSpace2, p.ReserveSpace3, p.Epsilon, + p.DataFormat, p.IsTraining, p.Name); + } + else if(version == 1) + { + return gen_nn_ops.fused_batch_norm_grad_v2(p.YBackprop, p.X, p.Scale, + p.ReserveSpace1, p.ReserveSpace2, p.Epsilon, p.DataFormat, + p.IsTraining, p.Name); + } + else + { + return gen_nn_ops.fused_batch_norm_grad(p.YBackprop, p.X, p.Scale, + p.ReserveSpace1, p.ReserveSpace2, p.Epsilon, p.DataFormat, + p.IsTraining, p.Name); + } + }; if (is_training) { @@ -406,7 +404,7 @@ public static Tensor[] _TopKGrad(Operation op, Tensor[] grads) // finally reshaping it to the original input shape. var scatter = gen_array_ops.scatter_nd(array_ops.expand_dims(ind, -1), array_ops.reshape(grad, new int[] { -1 }), - new Tensor[] { math_ops.reduce_prod(in_shape) }); + math_ops.reduce_prod(in_shape)); return new Tensor[] { diff --git a/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs b/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs index d43f8a0c8..84ce56a4b 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs @@ -34,7 +34,7 @@ public Tensor Apply(Tensor value, { name = scope; value = ops.convert_to_tensor(value, name: "input"); - return gen_nn_ops.average_pool( + return gen_nn_ops.avg_pool( value, ksize: ksize, strides: strides, diff --git a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs index 958d79f42..ec70b1858 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs @@ -67,16 +67,15 @@ public Tensor Apply(Tensors input, Tensor filters) var dilations = _get_sequence(args.DilationRate, num_spatial_dims, channel_index).ToArray(); var strides = _get_sequence(args.Strides, num_spatial_dims, channel_index).ToArray(); - result = gen_nn_ops.conv2d(new Conv2dParams - { - Input = input, - Filter = filters, - Strides = strides, - Padding = padding, - DataFormat = data_format, - Dilations = dilations, - Name = name - }); + result = gen_nn_ops.conv2d( + input, + filters, + strides, + padding, + data_format: data_format, + dilations: dilations, + name: name + ); } else { @@ -93,16 +92,15 @@ public Tensor Apply(Tensors input, Tensor filters) input = array_ops.expand_dims(input, spatial_start_dim); filters = array_ops.expand_dims(filters, 0); - result = gen_nn_ops.conv2d(new Conv2dParams - { - Input = input, - Filter = filters, - Strides = strides.ToArray(), - Padding = padding, - DataFormat = channel_first ? "NCHW" : "NHWC", - Dilations = dilations.ToArray(), - Name = name - }); + result = gen_nn_ops.conv2d( + input, + filters, + strides.ToArray(), + padding, + data_format: channel_first ? "NCHW" : "NHWC", + dilations: dilations.ToArray(), + name: name + ); result = array_ops.squeeze(result, new[] { spatial_start_dim }); } }); diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs deleted file mode 100644 index 408d06ebf..000000000 --- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs +++ /dev/null @@ -1,373 +0,0 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System.Linq; -using static Tensorflow.Binding; - -namespace Tensorflow.Operations -{ - public class gen_nn_ops - { - /// - /// Computes a 2-D convolution given 4-D `input` and `filter` tensors. - /// - /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` - /// and a filter / kernel tensor of shape - /// `[filter_height, filter_width, in_channels, out_channels]`, this op - /// performs the following: - /// - /// 1. Flattens the filter to a 2-D matrix with shape - /// `[filter_height * filter_width * in_channels, output_channels]`. - /// 2. Extracts image patches from the input tensor to form a *virtual* - /// tensor of shape `[batch, out_height, out_width, - /// filter_height * filter_width * in_channels]`. - /// 3. For each patch, right-multiplies the filter matrix and the image patch - /// vector. - /// - /// - /// - public static Tensor conv2d(Conv2dParams parameters) - => tf.Context.ExecuteOp("Conv2D", parameters.Name, new ExecuteOpArgs(parameters.Input, parameters.Filter) - .SetAttributes(new - { - strides = parameters.Strides, - padding = parameters.Padding, - use_cudnn_on_gpu = parameters.UseCudnnOnGpu, - explicit_paddings = parameters.ExplicitPaddings, - data_format = parameters.DataFormat, - dilations = parameters.Dilations - })); - - /// - /// Computes the gradients of convolution with respect to the filter. - /// - /// - /// - public static Tensor conv2d_backprop_filter(Tensor input, Tensor filter_sizes, Tensor out_backprop, - int[] strides, string padding, bool use_cudnn_on_gpu = true, - int[] explicit_paddings = null, - string data_format = "NHWC", - int[] dilations = null, - string name = null) - => tf.Context.ExecuteOp("Conv2DBackpropFilter", name, new ExecuteOpArgs(input, filter_sizes, out_backprop) - .SetAttributes(new - { - strides, - padding, - use_cudnn_on_gpu, - explicit_paddings = explicit_paddings ?? new int[0], - data_format, - dilations = dilations ?? new int[] { 1, 1, 1, 1 } - })); - - /// - /// Computes the gradients of convolution with respect to the input. - /// - /// - /// - public static Tensor conv2d_backprop_input(Tensor input_sizes, Tensor filter, Tensor out_backprop, - int[] strides, string padding, bool use_cudnn_on_gpu = true, - int[] explicit_paddings = null, - string data_format = "NHWC", - int[] dilations = null, - string name = null) - => tf.Context.ExecuteOp("Conv2DBackpropInput", name, new ExecuteOpArgs(input_sizes, filter, out_backprop) - .SetAttributes(new - { - strides, - padding, - use_cudnn_on_gpu, - explicit_paddings = explicit_paddings ?? new int[0], - data_format, - dilations = dilations ?? new int[] { 1, 1, 1, 1 } - })); - - public static Tensor bias_add(Tensor value, - IVariableV1 bias, - string data_format = null, - string name = null) - => tf.Context.ExecuteOp("BiasAdd", name, new ExecuteOpArgs(value, bias) - .SetAttributes(new { data_format = data_format ?? "NHWC" })); - - public static Tensor bias_add_grad(Tensor out_backprop, - string data_format = "NHWC", - string name = null) - => tf.Context.ExecuteOp("BiasAddGrad", name, new ExecuteOpArgs(out_backprop) - .SetAttributes(new { data_format = data_format ?? "NHWC" })); - - /// - /// Computes exponential linear: exp(features) - 1 if &lt; 0, features otherwise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - /// ](http://arxiv.org/abs/1511.07289) - /// - public static Tensor elu(Tensor features, string name = "Elu") - { - var op = tf.OpDefLib._apply_op_helper("Elu", name: name, args: new { features }); - return op.output; - } - - /// - /// Gradient for batch normalization. - /// - /// - /// - public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params) - { - var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new - { - y_backprop = @params.YBackprop, - x = @params.X, - scale = @params.Scale, - reserve_space_1 = @params.ReserveSpace1, - reserve_space_2 = @params.ReserveSpace2, - epsilon = @params.Epsilon, - data_format = @params.DataFormat, - is_training = @params.IsTraining - }); - return op.outputs; - } - - public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params) - => tf.Context.ExecuteOp("FusedBatchNormGradV3", @params.Name, - new ExecuteOpArgs(@params.YBackprop, - @params.X, - @params.Scale, - @params.ReserveSpace1, - @params.ReserveSpace2, - @params.ReserveSpace3) - .SetAttributes(new - { - epsilon = @params.Epsilon, - data_format = @params.DataFormat, - is_training = @params.IsTraining - })); - - public static Tensor[] fused_batch_norm(Tensor x, - Tensor scale, - Tensor offset, - Tensor mean, - Tensor variance, - float epsilon = 0.0001f, - string data_format = "NHWC", - bool is_training = true, - string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("FusedBatchNorm", name: name, args: new - { - x, - scale, - offset, - mean, - variance, - epsilon, - data_format, - is_training - }); - - return _op.outputs; - } - - public static Tensors fused_batch_norm_v3(Tensor x, - Tensor scale, - Tensor offset, - Tensor mean, - Tensor variance, - float epsilon = 0.0001f, - float exponential_avg_factor = 1.0f, - string data_format = "NHWC", - bool is_training = true, - string name = null) - => tf.Context.ExecuteOp("FusedBatchNormV3", name, new ExecuteOpArgs(x, scale, offset, mean, variance) - .SetAttributes(new { epsilon, data_format, is_training })); - - /// - /// Local Response Normalization. - /// - /// - /// - /// - /// - /// - /// - /// - public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1, - int alpha = 1, float beta = 0.5f, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("LRN", name: name, args: new - { - input, - depth_radius, - bias, - alpha, - beta - }); - - return _op.output; - } - - public static Tensor log_softmax(Tensor logits, string name = null) - => tf.Context.ExecuteOp("LogSoftmax", name, new ExecuteOpArgs(logits)); - - /// - /// Says whether the targets are in the top `K` predictions. - /// - /// - /// - /// - /// - /// A `Tensor` of type `bool`. - public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null) - => tf.Context.ExecuteOp("InTopKV2", name, - new ExecuteOpArgs(predictions, targets, k)); - - public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) - => tf.Context.ExecuteOp("LeakyRelu", name, - new ExecuteOpArgs(features).SetAttributes(new { alpha })); - - public static Tensor average_pool(Tensor input, - int[] ksize, - int[] strides, - string padding, - string data_format = "NHWC", - string name = null) - => tf.Context.ExecuteOp("AvgPool", name, new ExecuteOpArgs(input) - .SetAttributes(new - { - ksize, - strides, - padding, - data_format - })); - - public static Tensor max_pool(Tensor input, - int[] ksize, - int[] strides, - string padding, - string data_format = "NHWC", - string name = null) - => tf.Context.ExecuteOp("MaxPool", name, new ExecuteOpArgs(input) - .SetAttributes(new - { - ksize, - strides, - padding, - data_format - })); - - public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, - string data_format = "NHWC", string name = null) - => tf.Context.ExecuteOp("MaxPoolGrad", name, new ExecuteOpArgs(orig_input, orig_output, grad) - .SetAttributes(new - { - ksize, - strides, - padding, - data_format - })); - - public static Tensor[] top_kv2(Tensor input, T k, bool sorted = true, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("TopKV2", name: name, args: new - { - input, - k, - sorted - }); - - return _op.outputs; - } - - public static Tensor relu_grad(Tensor gradients, Tensor features, string name = null) - => tf.Context.ExecuteOp("ReluGrad", name, new ExecuteOpArgs(gradients, features)); - - public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null) - => tf.Context.ExecuteOp("LeakyReluGrad", name, new ExecuteOpArgs(gradients, features) - .SetAttributes(new { alpha })); - - public static Tensor softmax(Tensor logits, string name = null) - => tf.Context.ExecuteOp("Softmax", name, new ExecuteOpArgs(logits)); - - /// - /// Computes softmax cross entropy cost and gradients to backpropagate. - /// - /// - /// - /// - /// - public static (Tensor, Tensor) softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = null) - { - var results = tf.Context.ExecuteOp("SoftmaxCrossEntropyWithLogits", name, new ExecuteOpArgs(features, labels)); - - return (results[0], results[1]); - } - - /// - /// Computes softmax cross entropy cost and gradients to backpropagate. - /// - /// - /// batch_size x num_classes matrix - /// - /// - /// batch_size vector with values in [0, num_classes). - /// This is the label for the given minibatch entry. - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'. - /// - /// - /// Returns a tuple with multiple values, as follows: - /// loss : Per example loss (batch_size vector). - /// backprop : backpropagated gradients (batch_size x num_classes matrix). - /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. - /// - /// - /// Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept - /// a matrix of label probabilities, but rather a single label per row - /// of features. This label is considered to have probability 1.0 for the - /// given row. - /// - /// Inputs are the logits, not probabilities. - /// - public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") - { - var results = tf.Context.ExecuteOp("SparseSoftmaxCrossEntropyWithLogits", name, new ExecuteOpArgs(features, labels)); - - return (results[0], results[1]); - } - - /// - /// Computes rectified linear: `max(features, 0)`. - /// - /// A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `features`. - public static Tensor relu(Tensor features, string name = null) - => tf.Context.ExecuteOp("Relu", name, new ExecuteOpArgs(features)); - - public static Tensor tanh(Tensor x, string name = null) - => tf.Context.ExecuteOp("Tanh", name, new ExecuteOpArgs(x)); - } -} diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 3ccf0c190..76a222ba3 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -103,6 +103,11 @@ public Operation _apply_op_helper(string op_type_name, string name = null, Dicti DataType dtype = DataType.DtInvalid; DataType default_dtype = DataType.DtInvalid; + if (values is Tensors tensors) + { + values = (Tensor[])tensors; + } + if (_IsListParameter(input_arg)) { if (!_IsListValue(values)) diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index 311f2184f..a789c5f4b 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -187,6 +187,33 @@ public void run(FeedItem[] feed_dict = null, Session session = null) public virtual T get_attr(string name) => (T)get_attr(name); + internal unsafe TF_DataType _get_attr_type(string name) + { + Status status = new(); + TF_DataType result; + c_api.TF_OperationGetAttrType(_handle, name, new IntPtr(&result), status); + status.Check(true); + return result; + } + + internal unsafe int _get_attr_int(string name) + { + Status status = new(); + int result; + c_api.TF_OperationGetAttrInt(_handle, name, new IntPtr(&result), status); + status.Check(true); + return result; + } + + internal unsafe bool _get_attr_bool(string name) + { + Status status = new(); + bool result; + c_api.TF_OperationGetAttrBool(_handle, name, new IntPtr(&result), status); + status.Check(true); + return result; + } + public virtual T[] get_attr_list(string name) { if (tf.executing_eagerly()) @@ -229,7 +256,42 @@ public virtual object get_attr(string name) if(oneof_value == AttrValue.ValueOneofCase.List) { - throw new NotImplementedException($"Unsupported field type in {oneof_value}"); + if (x.List.S is not null && x.List.S.Count > 0) + { + return x.List.S.Select(x => x.ToStringUtf8()).ToArray(); + } + else if (x.List.I is not null && x.List.I.Count > 0) + { + return x.List.I.ToArray(); + } + else if (x.List.F is not null && x.List.F.Count > 0) + { + return x.List.F.ToArray(); + } + else if (x.List.B is not null && x.List.B.Count > 0) + { + return x.List.B.ToArray(); + } + else if (x.List.Shape is not null && x.List.Shape.Count > 0) + { + return x.List.Shape.ToArray(); + } + else if (x.List.Tensor is not null && x.List.Tensor.Count > 0) + { + return x.List.Tensor.ToArray(); + } + else if (x.List.Func is not null && x.List.Func.Count > 0) + { + return x.List.Func.ToArray(); + } + else if (x.List.Type is not null && x.List.Type.Count > 0) + { + return x.List.Type.Select(x => x.as_tf_dtype()).ToArray(); + } + else + { + return null; + } } if(oneof_value == AttrValue.ValueOneofCase.Type) { diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 2767e8219..a0b47aace 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -22,12 +22,13 @@ limitations under the License. using Tensorflow.Eager; using Tensorflow.Framework; using static Tensorflow.Binding; +using System.Diagnostics; namespace Tensorflow { public class array_ops { - public static Tensor placeholder_with_default(T input, int[] shape, string name = null) + public static Tensor placeholder_with_default(Tensor input, int[] shape, string name = null) => gen_array_ops.placeholder_with_default(input, shape, name); /// @@ -132,7 +133,7 @@ public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boo if (ndims_mask < 1) throw new ValueError("mask cannot be scalar."); - var leading_size = gen_math_ops.prod(shape(tensor_tensor)[$"{axis}:{axis + ndims_mask}"], new[] { 0 }); + var leading_size = gen_math_ops.prod(shape(tensor_tensor)[$"{axis}:{axis + ndims_mask}"], ops.convert_to_tensor(new[] { 0 })); var shape1 = concat(new[] { shape(tensor_tensor)[$":{axis}"], @@ -153,7 +154,7 @@ public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boo private static Tensor _apply_mask_1d(Tensor reshaped_tensor, Tensor mask, int axis = 0) { var indices = squeeze(where(mask), axis: new[] { 1 }); - return gather(reshaped_tensor, indices, axis: axis); + return gather(reshaped_tensor, indices, axis: ops.convert_to_tensor(axis)); } public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) @@ -293,7 +294,7 @@ public static Tensor _autopacking_helper(IEnumerable list_or_tuple, TF_D } public static Tensor expand_dims(Tensor input, int axis = -1, string name = null) - => gen_array_ops.expand_dims(input, axis, name); + => gen_array_ops.expand_dims(input, ops.convert_to_tensor(axis), name); /// /// Creates a tensor filled with a scalar value. @@ -304,7 +305,7 @@ public static Tensor expand_dims(Tensor input, int axis = -1, string name = null /// Optional string. The name of the output `tf.Tensor`. /// A `tf.Tensor` with shape `dims` and the same dtype as `value`. public static Tensor fill(Shape dims, T value, string name = null) - => gen_array_ops.fill(dims, value, name: name); + => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name); /// /// Returns the rank of a tensor. @@ -368,7 +369,7 @@ public static Tensor reshape(Tensor tensor, Shape shape, string name = null) => gen_array_ops.reshape(tensor, shape, name: name); public static Tensor reshape(Tensor tensor, object[] shape, string name = null) - => gen_array_ops.reshape(tensor, shape, name: name); + => gen_array_ops.reshape(tensor, ops.convert_to_tensor(shape), name: name); private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true) { @@ -466,7 +467,11 @@ public static Tensor one_hot(Tensor indices, Tensor depth, } public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null) - => gen_array_ops.unique(x, out_idx: out_idx, name: name); + { + var res = gen_array_ops.unique(x, out_idx: out_idx, name: name); + Debug.Assert(res.Length == 2); + return (res[0], res[1]); + } public static Tensor stack(Tensor[] values, int axis = 0, string name = "stack") { @@ -492,12 +497,12 @@ public static Tensor where(Tensor condition, object x = null, object y = null, s { name = scope; condition = ops.convert_to_tensor(condition, preferred_dtype: dtypes.@bool, name: "condition"); - return gen_array_ops.where(condition: condition, name: name); + return gen_array_ops.where(condition, name: name); }); } else if (x != null && y != null) { - return gen_array_ops.select(condition, x, y, name); + return gen_math_ops.select(condition, ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); } else { @@ -505,7 +510,6 @@ public static Tensor where(Tensor condition, object x = null, object y = null, s } } - public static Tensor where_v2(Tensor condition, object x = null, object y = null, string name = null) { if (x == null && y == null) @@ -514,18 +518,19 @@ public static Tensor where_v2(Tensor condition, object x = null, object y = null { name = scope; condition = ops.convert_to_tensor(condition, preferred_dtype: dtypes.@bool, name: "condition"); - return gen_array_ops.where(condition: condition, name: name); + return gen_array_ops.where(condition, name: name); }); } else if (x != null && y != null) { - return gen_array_ops.select_v2(condition, x, y, name); + return gen_math_ops.select_v2(condition, ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); } else { throw new ValueError("x and y must both be non-None or both be None."); } } + /// /// Returns the shape of a tensor. /// @@ -634,7 +639,13 @@ public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.D /// /// public static Tensor stop_gradient(Tensor input, string name = null) - => tf.Context.ExecuteOp("StopGradient", name, new ExecuteOpArgs(input)); + { + var tape = tf.GradientTape().stop_recording(); + var result = gen_array_ops.stop_gradient(input, name); + tape.StartRecord(); + tf.GradientTape().PushTape(tape); + return result; + } /// /// Extracts a strided slice of a tensor (generalized python array indexing). @@ -858,7 +869,7 @@ public static Tensor concat(Tensor[] values, int axis, string name = "concat") }); } - return gen_array_ops.concat_v2(values, axis, name: name); + return gen_array_ops.concat_v2(values, ops.convert_to_tensor(axis), name: name); } public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat") @@ -868,7 +879,7 @@ public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat" public static Tensor concat(object[] values, int axis, string name = "concat") { - return gen_array_ops.concat_v2(values, axis, name: name); + return tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); } /// @@ -886,18 +897,33 @@ public static Tensor concat(object[] values, int axis, string name = "concat") /// /// An integer. The number of batch dimensions. Must be less than or equal to rank(indices). /// - public static Tensor gather(T1 @params, T2 indices, string name = null, int axis = 0, int batch_dims = 0) + public static Tensor gather(Tensor @params, Tensor indices, string name = null, Tensor axis = null, int batch_dims = 0) { - if (axis != 0) - return gen_array_ops.gather_v2(@params, indices, axis, name: name); - - if (@params is ResourceVariable variable && - indices is Tensor indices_tensor) - return variable.sparse_read(indices_tensor, name); + if (axis is null) + axis = tf.convert_to_tensor(batch_dims); + if(tensor_util.constant_value(axis) != 0) + { + return gen_array_ops.gather_v2(@params, indices, axis, batch_dims: batch_dims, name: name); + } return gen_array_ops.gather_v2(@params, indices, axis, name: name); } + public static Tensor gather(Tensor @params, Tensor indices, int axis, string name = null, int batch_dims = 0) + => gather(@params, indices, name, ops.convert_to_tensor(axis), batch_dims); + + public static Tensor gather(ResourceVariable @params, Tensor indices, string name = null, Tensor axis = null, int batch_dims = 0) + { + if (axis is null) + axis = tf.convert_to_tensor(batch_dims); + if (tensor_util.constant_value(axis) != 0) + { + throw new NotImplementedException(); + } + + return @params.sparse_read(indices, name); + } + public static Tensor transpose(T1 a, Axis perm, string name = "transpose", bool conjugate = false) { return tf_with(ops.name_scope(name, "transpose", new { a }), scope => @@ -927,7 +953,7 @@ public static Tensor[] split(Tensor value, Tensor size_splits, int axis, int num if (num == -1) num = (int)size_splits.shape[0]; - return gen_array_ops.split_v(value, size_splits, axis, num, name: name); + return gen_array_ops.split_v(value, size_splits, tf.convert_to_tensor(axis), num, name: name); } public static Tensor[] split(Tensor value, int num_split, T axis, @@ -956,20 +982,10 @@ private static Tensor[] split_eager_fallback(Ta axis, Tv value, int num_ } public static Tensor slice(Tensor input, Tensor[] begin, Tensor[] size, string name = null) - => gen_array_ops.slice(input, begin, size, name: name); - - public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null) - => gen_array_ops.slice(input, begin, size, name: name); + => gen_array_ops.slice(input, ops.convert_to_tensor(begin), ops.convert_to_tensor(size), name: name); public static Tensor slice(Tensor input, Tensor begin, Tensor size, string name = null) - => tf.Context.ExecuteOp("Slice", name, new ExecuteOpArgs(input, begin, size) - { - GetGradientAttrs = (op) => new - { - T = op.get_attr("T"), - Index = op.get_attr("Index") - } - }); + => gen_array_ops.slice(input, begin, size, name: name); public static Tensor stack(object values, int axis = 0, string name = "stack") diff --git a/src/TensorFlowNET.Core/Operations/dataset_ops.cs b/src/TensorFlowNET.Core/Operations/dataset_ops.cs index c7e627772..061fb95e3 100644 --- a/src/TensorFlowNET.Core/Operations/dataset_ops.cs +++ b/src/TensorFlowNET.Core/Operations/dataset_ops.cs @@ -233,7 +233,7 @@ public Tensor anonymous_iterator_v3(TF_DataType[] output_types, Shape[] output_s { try { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("AnonymousIteratorV3", name) + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AnonymousIteratorV3", name) { attrs = attrs }); @@ -250,7 +250,7 @@ public Tensor anonymous_iterator_v3(TF_DataType[] output_types, Shape[] output_s public Tensor anonymous_iterator_v3_eager_fallback(TF_DataType[] output_types, Shape[] output_shapes, string name, Context ctx) { object[] attrs = new object[] { output_types, output_shapes }; - var result = execute.quick_execute("AnonymousIteratorV3", 1, new Tensor[] { }, attrs, ctx, name); + var result = _execute.quick_execute("AnonymousIteratorV3", 1, new Tensor[] { }, attrs, ctx, name); return result[0]; } diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs index 1dc6504ab..9810d32f3 100644 --- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs @@ -1,543 +1,10327 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Linq; -using Tensorflow.Contexts; using Tensorflow.Eager; +using Tensorflow.Contexts; using static Tensorflow.Binding; -namespace Tensorflow +namespace Tensorflow; + +public static class gen_array_ops { - public static class gen_array_ops + /// + /// + /// + /// + /// + /// + /// + public static Tensor batch_matrix_band_part(Tensor input, Tensor num_lower, Tensor num_upper, string? name = null) { - public static Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null) + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - var _op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops }); - - return _op.output; + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_matrix_band_part_eager_fallback(input, num_lower, num_upper, name: name, ctx: _ctx); + } + catch (Exception) + { + } } - - public static Tensor check_numerics(Tensor tensor, string message, string name = null) + Dictionary keywords = new(); + keywords["input"] = input; + keywords["num_lower"] = num_lower; + keywords["num_upper"] = num_upper; + var _op = tf.OpDefLib._apply_op_helper("BatchMatrixBandPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message }); - - return _op.output; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BatchMatrixBandPart", _op.inputs, _attrs, _result); } + return _result[0]; + } - /// - /// Concatenates tensors along one dimension. - /// - /// - /// - /// - /// - public static Tensor concat_v2(T[] values, Ta axis, string name = null) - => tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); - - public static Tensor concat_v2(Tensor[] values, Tensor axis, string name = null) + public static Tensor batch_matrix_band_part_eager_fallback(Tensor input, Tensor num_lower, Tensor num_upper, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, num_lower, num_upper }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("BatchMatrixBandPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatrixBandPart", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + public static Tensor batch_matrix_diag(Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - if (tf.Context.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_matrix_diag_eager_fallback(diagonal, name: name, ctx: _ctx); + } + catch (Exception) { - return concat_v2_eager_fallback(values, axis, name, tf.Context); } - - var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); - return _op.output; } - - public static Tensor concat_v2(Tensor[] values, int axis, string name = null) - => tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); - - private static Tensor concat_v2_eager_fallback(T1[] values, T2 axis, string name, Context ctx) + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("BatchMatrixDiag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _attr_N = len(values); - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: values.Select(x => (object)x).ToArray()); - var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new object[] { axis }); - var _inputs_flat = input.concat(axis1); - var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx }; - - return tf.Runner.Execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BatchMatrixDiag", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null) + public static Tensor batch_matrix_diag_eager_fallback(Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal }; + object[] _attrs = new object[] { "T", diagonal.dtype }; + var _result = _execute.execute("BatchMatrixDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape }); - - return _op.outputs; + _execute.record_gradient("BatchMatrixDiag", _inputs_flat, _attrs, _result); } - - /// - /// Returns a diagonal tensor with a given diagonal values. - /// - /// - /// Rank k tensor where k is at most 1. - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Diag'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Given a diagonal, this operation returns a tensor with the diagonal and - /// everything else padded with zeros. The diagonal is computed as follows: - /// - /// Assume diagonal has dimensions [D1,..., Dk], then the output is a tensor of - /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: - /// - /// output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik] and 0 everywhere else. - /// - /// For example: - /// - /// - /// # 'diagonal' is [1, 2, 3, 4] - /// tf.diag(diagonal) ==&gt; [[1, 0, 0, 0] - /// [0, 2, 0, 0] - /// [0, 0, 3, 0] - /// [0, 0, 0, 4]] - /// - /// - public static Tensor diag(Tensor diagonal, string name = null) - => tf.Context.ExecuteOp("Diag", name, new ExecuteOpArgs(diagonal)); - - public static Tensor diag_part(Tensor diagonal, string name = null) - => tf.Context.ExecuteOp("DiagPart", name, new ExecuteOpArgs(diagonal)); - - public static Tensor expand_dims(Tensor input, int axis, string name = null) - => tf.Context.ExecuteOp("ExpandDims", name, new ExecuteOpArgs(input, axis) - .SetAttributes(new { dim = axis })); - - public static Tensor gather_v2(T1 @params, T2 indices, int axis, int batch_dims = 0, string name = null) + return _result[0]; + } + /// + /// + /// + /// + /// + public static Tensor batch_matrix_diag_part(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_matrix_diag_part_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("BatchMatrixDiagPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var result = tf.Context.ExecuteOp("GatherV2", name, new ExecuteOpArgs( - @params, - indices, - axis).SetAttributes(new { batch_dims })); - return result [0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BatchMatrixDiagPart", _op.inputs, _attrs, _result); } + return _result[0]; + } - private static Tensor gather_v2_eager_fallback(object @params, object indices, int axis, string name, Context ctx) + public static Tensor batch_matrix_diag_part_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("BatchMatrixDiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var (_attr_T, param) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { @params }); - var (_attr_Tindice, indice) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { indices }); - var (_attr_Taxis, axiss) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new object[] { axis }); - var _inputs_flat = param.concat(indice).concat(axiss); - var _attrs = new object[] { "batch_dims", 0, "Tparams", _attr_T, "Tindices", _attr_Tindice, "Taxis", _attr_Taxis }; - - var results = tf.Runner.Execute(ctx, "GatherV2", 1, _inputs_flat, _attrs, name: name); - if (tf.Runner.MustRecordGradient()) - tf.Runner.RecordGradient("GatherV2", _inputs_flat, _attrs, results); - return results[0]; + _execute.record_gradient("BatchMatrixDiagPart", _inputs_flat, _attrs, _result); } - - - public static Tensor pad(Tensor input, Tensor paddings, string name = null) + return _result[0]; + } + /// + /// + /// + /// + /// + /// + public static Tensor batch_matrix_set_diag(Tensor input, Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - if (tf.Context.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_matrix_set_diag_eager_fallback(input, diagonal, name: name, ctx: _ctx); + } + catch (Exception) { - /*var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, - "Pad", name, - null, - input, paddings); - return results[0];*/ - return pad_eager_fallback(input, paddings, name: name, ctx: tf.Context); } - - var _op = tf.OpDefLib._apply_op_helper("Pad", name: name, args: new { input, paddings }); - - return _op.output; } - - private static Tensor pad_eager_fallback(Tensor inputs, Tensor padding, string name = null, Context ctx = null) + Dictionary keywords = new(); + keywords["input"] = input; + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("BatchMatrixSetDiag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs }); - var (_attr_Tpaddings, paddings) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { padding }); - var _inputs_flat = input.concat(paddings); - var _attrs = new object[] { "T", _attr_T, "Tpaddings", _attr_Tpaddings }; - - var results = tf.Runner.Execute(ctx, "Pad", 1, _inputs_flat, _attrs, name: name); - if (tf.Runner.MustRecordGradient()) - tf.Runner.RecordGradient("Pad", _inputs_flat, _attrs, results); - return results[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BatchMatrixSetDiag", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor pack(Tensor[] values, int axis = 0, string name = null) - => tf.Context.ExecuteOp("Pack", name, new ExecuteOpArgs() + public static Tensor batch_matrix_set_diag_eager_fallback(Tensor input, Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, diagonal }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("BatchMatrixSetDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatrixSetDiag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// BatchToSpace for 4-D tensors of type T. + /// + /// + /// + /// This is a legacy version of the more general BatchToSpaceND. + /// + /// Rearranges (permutes) data from batch into blocks of spatial data, followed by + /// cropping. This is the reverse transformation of SpaceToBatch. More specifically, + /// this op outputs a copy of the input tensor where values from the `batch` + /// dimension are moved in spatial blocks to the `height` and `width` dimensions, + /// followed by cropping along the `height` and `width` dimensions. + /// + /// + /// + /// + /// + /// + public static Tensor batch_to_space(Tensor input, Tensor crops, int block_size = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try { - OpInputArgs = new object[] { values } - }.SetAttributes(new { axis })); - - /// - /// Return a tensor with the same shape and contents as the input tensor or value. - /// - /// - /// - public static Tensor identity(Tensor input, string name = null) - => tf.Context.ExecuteOp("Identity", name, new ExecuteOpArgs(input)); - - public static Tensor invert_permutation(Tensor x, string name = null) + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpace", name) { args = new object[] { input, crops }, attrs = new Dictionary() { ["block_size"] = block_size } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_to_space_eager_fallback(input, crops, block_size: block_size, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["crops"] = crops; + keywords["block_size"] = block_size; + var _op = tf.OpDefLib._apply_op_helper("BatchToSpace", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("InvertPermutation", name, new { x }); - - return _op.outputs[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("BatchToSpace", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor log(Tensor x, string name = null) - => tf.Context.ExecuteOp("Log", name, new ExecuteOpArgs(x)); - - - public static Tensor rank(Tensor input, string name = null) - => tf.Context.ExecuteOp("Rank", name, new ExecuteOpArgs(input)); - - /// - /// Creates a tensor filled with a scalar value. - /// - /// A `Tensor`. - /// A `Tensor`. 0-D (scalar). Value to fill the returned tensor. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `value`. - public static Tensor fill(Tensor dims, T value, string name = null) + public static Tensor batch_to_space_eager_fallback(Tensor input, Tensor crops, int block_size, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, crops }; + object[] _attrs = new object[] { "T", input.dtype, "block_size", block_size, "Tidx", crops.dtype }; + var _result = _execute.execute("BatchToSpace", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchToSpace", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// BatchToSpace for N-D tensors of type T. + /// + /// + /// + /// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + /// `block_shape + [batch]`, interleaves these blocks back into the grid defined by + /// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as + /// the input. The spatial dimensions of this intermediate result are then + /// optionally cropped according to `crops` to produce the output. This is the + /// reverse of SpaceToBatch. See below for a precise description. + /// + /// + /// + /// + /// + /// + public static Tensor batch_to_space_nd(Tensor input, Tensor block_shape, Tensor crops, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - var ctx = tf.Context; - if (ctx.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpaceND", name) { args = new object[] { input, block_shape, crops }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) { - try - { - var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Fill", name, dims, value)); - return _result[0]; - } - catch (Exception) - { - - } - try - { - return fill_eager_fallback(dims, value as Tensor, name, ctx); - } - catch (Exception) - { - - } } - Dictionary attrs = new Dictionary(); - attrs["dims"] = dims; - attrs["value"] = value; - var result = tf.OpDefLib._apply_op_helper("Fill", name, attrs); - if (execute.must_record_gradient()) + try + { + return batch_to_space_nd_eager_fallback(input, block_shape, crops, name: name, ctx: _ctx); + } + catch (Exception) { - throw new NotImplementedException(); } - return result.output; } - - public static Tensor fill_eager_fallback(Tensor dims, Tensor value, string name, Context ctx) + Dictionary keywords = new(); + keywords["input"] = input; + keywords["block_shape"] = block_shape; + keywords["crops"] = crops; + var _op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - object[] attrs = new object[] { "T", dims.dtype.as_datatype_enum(), "index_type", dims.dtype.as_datatype_enum() }; - var _result = execute.executes("Fill", 1, new Tensor[] { dims, value }, attrs, ctx, name); + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tblock_shape", _op._get_attr_type("Tblock_shape"), "Tcrops", _op._get_attr_type("Tcrops") }; + _execute.record_gradient("BatchToSpaceND", _op.inputs, _attrs, _result); + } + return _result[0]; + } - if (execute.must_record_gradient()) + public static Tensor batch_to_space_nd_eager_fallback(Tensor input, Tensor block_shape, Tensor crops, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, block_shape, crops }; + object[] _attrs = new object[] { "T", input.dtype, "Tblock_shape", block_shape.dtype, "Tcrops", crops.dtype }; + var _result = _execute.execute("BatchToSpaceND", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchToSpaceND", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Bitcasts a tensor from one type to another without copying data. + /// + /// + /// + /// Given a tensor `input`, this operation returns a tensor that has the same buffer + /// data as `input` with datatype `type`. + /// + /// If the input datatype `T` is larger than the output datatype `type` then the + /// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + /// + /// If `T` is smaller than `type`, the operator requires that the rightmost + /// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + /// [..., sizeof(`type`)/sizeof(`T`)] to [...]. + /// + /// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + /// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() + /// gives module error. + /// For example, + /// + /// Example 1: + /// + /// >>> a = [1., 2., 3.] + /// >>> equality_bitcast = tf.bitcast(a, tf.complex128) + /// Traceback (most recent call last): + /// ... + /// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + /// >>> equality_cast = tf.cast(a, tf.complex128) + /// >>> print(equality_cast) + /// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + /// + /// Example 2: + /// + /// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + /// + /// + /// Example 3: + /// + /// >>> x = [1., 2., 3.] + /// >>> y = [0., 2., 3.] + /// >>> equality= tf.equal(x,y) + /// >>> equality_cast = tf.cast(equality,tf.float32) + /// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + /// >>> print(equality) + /// tf.Tensor([False True True], shape=(3,), dtype=bool) + /// >>> print(equality_cast) + /// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + /// >>> print(equality_bitcast) + /// tf.Tensor( + /// [[ 0 0 0 0] + /// [ 0 0 128 63] + /// [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + /// + /// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different + /// endian orderings will give different results. + /// + /// + /// + /// + /// + public static Tensor bitcast(Tensor input, TF_DataType type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bitcast", name) { args = new object[] { input }, attrs = new Dictionary() { ["type"] = type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bitcast_eager_fallback(input, type: type, name: name, ctx: _ctx); + } + catch (Exception) { - throw new NotImplementedException(); } - return _result[0]; } - //=> tf.Context.ExecuteOp("Fill", name, new ExecuteOpArgs(dims, value)); - - /// - /// Return the reduction indices for computing gradients of s0 op s1 with broadcast. - /// - /// A `Tensor`. Must be one of the following types: `int32`, `int64`. - /// A `Tensor`. Must have the same type as `s0`. - /// A name for the operation (optional). - /// A tuple of `Tensor` objects (r0, r1). - public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "") + Dictionary keywords = new(); + keywords["input"] = input; + keywords["type"] = type; + var _op = tf.OpDefLib._apply_op_helper("Bitcast", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var results = tf.Context.ExecuteOp("BroadcastGradientArgs", name, new ExecuteOpArgs(s0, s1)); - return (results[0], results[1]); + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "type", _op._get_attr_type("type") }; + _execute.record_gradient("Bitcast", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor reverse(Tensor tensor, T axis, string name = null) + public static Tensor bitcast_eager_fallback(Tensor input, TF_DataType type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "type", type }; + var _result = _execute.execute("Bitcast", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("ReverseV2", name, new { tensor, axis }); - return _op.output; + _execute.record_gradient("Bitcast", _inputs_flat, _attrs, _result); } - - public static Tensor reshape(Tensor tensor, T shape, string name = null) - => tf.Context.ExecuteOp("Reshape", name, new ExecuteOpArgs(tensor, shape)); - - public static Tensor reshape(Tensor tensor, object[] shape, string name = null) - => tf.Context.ExecuteOp("Reshape", name, new ExecuteOpArgs(tensor, shape)); - - private static Tensor reshape_eager_fallback(Tensor tensor, object[] shape, string name, Context ctx) + return _result[0]; + } + /// + /// Return the shape of s0 op s1 with broadcast. + /// + /// + /// + /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the + /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. + /// + /// + /// + /// + /// + public static Tensor broadcast_args(Tensor s0, Tensor s1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - var (_attr_T, _input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { tensor }); - var (_attr_Tshape, _input_shape) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { shape }, default_dtype: TF_DataType.TF_INT32); - var _inputs_flat = new[] { _input[0], _input_shape[0] }; - var _attrs = new object[] { "T", _attr_T, "Tshape", _attr_Tshape }; - - var results = tf.Runner.Execute(ctx, "Reshape", 1, _inputs_flat, _attrs, name: name); - if (tf.Runner.MustRecordGradient()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return broadcast_args_eager_fallback(s0, s1, name: name, ctx: _ctx); + } + catch (Exception) { - tf.Runner.RecordGradient("Reshape", _inputs_flat, _attrs, results); } - return results[0]; } - - /// - /// Finds unique elements in a 1-D tensor. - /// - /// - /// - /// - /// - public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null) + Dictionary keywords = new(); + keywords["s0"] = s0; + keywords["s1"] = s1; + var _op = tf.OpDefLib._apply_op_helper("BroadcastArgs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("Unique", name, new { x, out_idx }); - // TODO - //var _result = _UniqueOutput._make(_op.outputs); - return (_op.outputs[0], _op.outputs[1]); + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BroadcastArgs", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor[] unpack(Tensor value, int num, int axis = 0, string name = null) - => tf.Context.ExecuteOp("Unpack", name, new ExecuteOpArgs(value, num) - .SetAttributes(new { axis, num })); - - public static Tensor where(Tensor condition, string name = null) + public static Tensor broadcast_args_eager_fallback(Tensor s0, Tensor s1, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { s0, s1 }; + object[] _attrs = new object[] { "T", s0.dtype }; + var _result = _execute.execute("BroadcastArgs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("Where", name, new { input = condition }); - return _op.output; + _execute.record_gradient("BroadcastArgs", _inputs_flat, _attrs, _result); } - - public static Tensor one_hot(Tensor indices, Tensor depth, - Tensor on_value = null, - Tensor off_value = null, - TF_DataType dtype = TF_DataType.DtInvalid, - int axis = -1, - string name = null) - => tf.Context.ExecuteOp("OneHot", name, new ExecuteOpArgs(indices, depth, on_value, off_value) - .SetAttributes(new { axis })); - - /// - /// A placeholder op that passes through `input` when its output is not fed. - /// - /// The default value to produce when output is not fed. - /// - /// - /// - public static Tensor placeholder_with_default(T input, int[] shape, string name = null) + return _result[0]; + } + /// + /// Return the reduction indices for computing gradients of s0 op s1 with broadcast. + /// + /// + /// + /// This is typically used by gradient computations for a broadcasting operation. + /// + /// + /// + /// + /// + public static Tensor[] broadcast_gradient_args(Tensor s0, Tensor s1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastGradientArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return broadcast_gradient_args_eager_fallback(s0, s1, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["s0"] = s0; + keywords["s1"] = s1; + var _op = tf.OpDefLib._apply_op_helper("BroadcastGradientArgs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name }); - return _op.outputs[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BroadcastGradientArgs", _op.inputs, _attrs, _result); } + return _result; + } - public static Tensor select(Tensor condition, Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Select", name, new ExecuteOpArgs(condition, x, y)); - - public static Tensor select_v2(Tensor condition, Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("SelectV2", name, new ExecuteOpArgs(condition, x, y)); - - public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor[] shape, string name = null) + public static Tensor[] broadcast_gradient_args_eager_fallback(Tensor s0, Tensor s1, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { s0, s1 }; + object[] _attrs = new object[] { "T", s0.dtype }; + var _result = _execute.execute("BroadcastGradientArgs", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("ScatterNd", name, new { indices, updates, shape }); - return _op.outputs[0]; + _execute.record_gradient("BroadcastGradientArgs", _inputs_flat, _attrs, _result); } - - public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) - => tf.Context.ExecuteOp("Shape", name, new ExecuteOpArgs(input) - .SetAttributes(new { out_type })); - - /// - /// Returns shape of tensors. - /// - /// - /// - /// - /// - public static Tensor[] shape_n(Tensor[] input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) - => tf.Context.ExecuteOp("ShapeN", name, new ExecuteOpArgs() + return _result; + } + /// + /// Broadcast an array for a compatible shape. + /// + /// + /// + /// Broadcasting is the process of making arrays to have compatible shapes + /// for arithmetic operations. Two shapes are compatible if for each + /// dimension pair they are either equal or one of them is one. + /// + /// For example: + /// + /// >>> x = tf.constant([[1, 2, 3]]) # Shape (1, 3,) + /// >>> y = tf.broadcast_to(x, [2, 3]) + /// >>> print(y) + /// tf.Tensor( + /// [[1 2 3] + /// [1 2 3]], shape=(2, 3), dtype=int32) + /// + /// In the above example, the input Tensor with the shape of `[1, 3]` + /// is broadcasted to output Tensor with shape of `[2, 3]`. + /// + /// When broadcasting, if a tensor has fewer axes than necessary its shape is + /// padded on the left with ones. So this gives the same result as the previous + /// example: + /// + /// >>> x = tf.constant([1, 2, 3]) # Shape (3,) + /// >>> y = tf.broadcast_to(x, [2, 3]) + /// + /// + /// When doing broadcasted operations such as multiplying a tensor + /// by a scalar, broadcasting (usually) confers some time or space + /// benefit, as the broadcasted tensor is never materialized. + /// + /// However, `broadcast_to` does not carry with it any such benefits. + /// The newly-created tensor takes the full memory of the broadcasted + /// shape. (In a graph context, `broadcast_to` might be fused to + /// subsequent operation and then be optimized away, however.) + /// + /// + /// + /// + /// + public static Tensor broadcast_to(Tensor input, Tensor shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try { - OpInputArgs = new object[] { input } - }.SetAttributes(new { out_type })); - - public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastTo", name) { args = new object[] { input, shape }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return broadcast_to_eager_fallback(input, shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("BroadcastTo", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("Size", name, new { input, out_type }); - return _op.outputs[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("BroadcastTo", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor slice(Tensor input, Tensor[] begin, Tensor[] size, string name = null) + public static Tensor broadcast_to_eager_fallback(Tensor input, Tensor shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, shape }; + object[] _attrs = new object[] { "T", input.dtype, "Tidx", shape.dtype }; + var _result = _execute.execute("BroadcastTo", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BroadcastTo", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Checks a tensor for NaN and Inf values. + /// + /// + /// + /// When run, reports an `InvalidArgument` error if `tensor` has any values + /// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input + /// tensor. + /// + /// Example usage: + /// + /// ``` python + /// a = tf.Variable(1.0) + /// tf.debugging.check_numerics(a, message='') + /// + /// b = tf.Variable(np.nan) + /// try: + /// tf.debugging.check_numerics(b, message='Checking b') + /// except Exception as e: + /// assert "Checking b : Tensor had NaN values" in e.message + /// + /// c = tf.Variable(np.inf) + /// try: + /// tf.debugging.check_numerics(c, message='Checking c') + /// except Exception as e: + /// assert "Checking c : Tensor had Inf values" in e.message + /// ``` + /// + /// + /// + /// + /// + /// + /// Prefix of the error message. + /// + /// + /// + public static Tensor check_numerics(Tensor tensor, string message, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - if (tf.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumerics", name) { args = new object[] { tensor }, attrs = new Dictionary() { ["message"] = message } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return check_numerics_eager_fallback(tensor, message: message, name: name, ctx: _ctx); + } + catch (Exception) { - var result = slice_eager_fallback(input, begin, size, name, tf.Context); - return result; } - - var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size }); - return _op.outputs[0]; } - - private static Tensor slice_eager_fallback(Tensor inputs, Tensor[] begin, Tensor[] size, string name, Context ctx) + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["message"] = message; + var _op = tf.OpDefLib._apply_op_helper("CheckNumerics", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs }); - var (_attr_Tidx, _inputs_Index) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { begin, size }); - var _inputs_flat = input.concat(_inputs_Index); - var _attrs = new object[] { "T", _attr_T, "Index", _attr_Tidx }; + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") }; + _execute.record_gradient("CheckNumerics", _op.inputs, _attrs, _result); + } + return _result[0]; + } - var results = tf.Runner.Execute(ctx, "Slice", 1, _inputs_flat, _attrs, name: name); - if (tf.Runner.MustRecordGradient()) + public static Tensor check_numerics_eager_fallback(Tensor tensor, string message, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor }; + object[] _attrs = new object[] { "T", tensor.dtype, "message", message }; + var _result = _execute.execute("CheckNumerics", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("CheckNumerics", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Checks a tensor for NaN, -Inf and +Inf values. + /// + /// + /// + /// When run, reports an `InvalidArgument` error if `tensor` has any values + /// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input + /// tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf + /// in the errors it throws. + /// + /// + /// + /// + /// + /// Prefix of the error message. + /// + /// + /// + public static Tensor check_numerics_v2(Tensor tensor, string message, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumericsV2", name) { args = new object[] { tensor }, attrs = new Dictionary() { ["message"] = message } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return check_numerics_v2_eager_fallback(tensor, message: message, name: name, ctx: _ctx); + } + catch (Exception) { - tf.Runner.RecordGradient("Slice", _inputs_flat, _attrs, results); } - return results[0]; } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["message"] = message; + var _op = tf.OpDefLib._apply_op_helper("CheckNumericsV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") }; + _execute.record_gradient("CheckNumericsV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null) + public static Tensor check_numerics_v2_eager_fallback(Tensor tensor, string message, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor }; + object[] _attrs = new object[] { "T", tensor.dtype, "message", message }; + var _result = _execute.execute("CheckNumericsV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - if (tf.executing_eagerly()) + _execute.record_gradient("CheckNumericsV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Concatenates tensors along one dimension. + /// + /// + /// + /// + public static Tensor concat(Tensor concat_dim, Tensors values, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Concat", name) { args = new object[] { concat_dim, values }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return concat_eager_fallback(concat_dim, values, name: name, ctx: _ctx); + } + catch (Exception) { - var outputs = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Slice", name, input, begin, size)); - return outputs[0]; } - - var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size }); - return _op.outputs[0]; } + Dictionary keywords = new(); + keywords["concat_dim"] = concat_dim; + keywords["values"] = values; + var _op = tf.OpDefLib._apply_op_helper("Concat", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("Concat", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor[] split_v(Tensor value, Tensor size_splits, - int axis, int num_split, string name = null) - => tf.Context.ExecuteOp("SplitV", name, new ExecuteOpArgs(value, size_splits, axis) - .SetAttributes(new { num_split })); - - public static Tensor tile(Tensor input, Tensor multiples, string name = null) - => tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, multiples)); - - public static Tensor tile(Tensor input, object[] multiples, string name = null) - => tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, multiples)); - - public static Tensor transpose(Tensor x, T1 perm, string name = null) - => tf.Context.ExecuteOp("Transpose", name, new ExecuteOpArgs(x, perm)); - - public static Tensor ones_like(Tensor x, string name = null) - => tf.Context.ExecuteOp("OnesLike", name, new ExecuteOpArgs(x)); - - public static Tensor zeros_like(Tensor x, string name = null) - => tf.Context.ExecuteOp("ZerosLike", name, new ExecuteOpArgs(x)); - - public static Tensor stop_gradient(Tensor x, string name = null) + public static Tensor concat_eager_fallback(Tensor concat_dim, Tensors values, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.Add(concat_dim); + _inputs_flat_list.AddRange(values); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype }; + var _result = _execute.execute("Concat", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Concat", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes offsets of concat inputs within its output. + /// + /// + /// + /// For example: + /// + /// >>> x = [2, 2, 7] + /// >>> y = [2, 3, 7] + /// >>> z = [2, 9, 7] + /// >>> offsets = concat_offset(1, [x, y, z]) + /// >>> [list(off.numpy()) for off in offsets] + /// [[0, 0, 0], [0, 2, 0], [0, 5, 0]] + /// + /// This is typically used by gradient computations for a concat operation. + /// + /// + /// + /// + /// + public static Tensor[] concat_offset(Tensor concat_dim, Tensors shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatOffset", name) { args = new object[] { concat_dim, shape }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return concat_offset_eager_fallback(concat_dim, shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["concat_dim"] = concat_dim; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("ConcatOffset", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("StopGradient", name, args: new { input = x, name }); + object[] _attrs = new object[] { "N", _op._get_attr_int("N") }; + _execute.record_gradient("ConcatOffset", _op.inputs, _attrs, _result); + } + return _result; + } - return _op.output; + public static Tensor[] concat_offset_eager_fallback(Tensor concat_dim, Tensors shape, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.Add(concat_dim); + _inputs_flat_list.AddRange(shape); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", shape.Length }; + var _result = _execute.execute("ConcatOffset", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ConcatOffset", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Concatenates tensors along one dimension. + /// + /// + /// + /// + public static Tensor concat_v2(Tensors values, Tensor axis, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatV2", name) { args = new object[] { values, axis }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return concat_v2_eager_fallback(values, axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["values"] = values; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("ConcatV2", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides, - long begin_mask = 0, - long end_mask = 0, - long ellipsis_mask = 0, - long new_axis_mask = 0, - long shrink_axis_mask = 0, - string name = null) - => tf.Context.ExecuteOp("StridedSlice", name, new ExecuteOpArgs(input, begin, end, strides) - .SetAttributes(new - { - begin_mask, - end_mask, - ellipsis_mask, - new_axis_mask, - shrink_axis_mask - })); - - public static Tensor resource_strided_slice_assign(Tensor input, Tensor begin, Tensor end, Tensor strides, Tensor value, - int begin_mask = 0, - int end_mask = 0, - int ellipsis_mask = 0, - int new_axis_mask = 0, - int shrink_axis_mask = 0, - string name = null) - => tf.Context.ExecuteOp("ResourceStridedSliceAssign", name, new ExecuteOpArgs(input, begin, end, strides, value) - .SetAttributes(new - { - begin_mask, - end_mask, - ellipsis_mask, - new_axis_mask, - shrink_axis_mask - })); - - public static Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides, - int begin_mask = 0, - int end_mask = 0, - int ellipsis_mask = 0, - int new_axis_mask = 0, - int shrink_axis_mask = 0, - string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, new - { - input, - begin, - end, - strides, - begin_mask, - end_mask, - ellipsis_mask, - new_axis_mask, - shrink_axis_mask - }); - - return _op.outputs[0]; - } - - /// - /// Removes dimensions of size 1 from the shape of a tensor. - /// Given a tensor `input`, this operation returns a tensor of the same type with - /// all dimensions of size 1 removed.If you don't want to remove all size 1 - /// dimensions, you can remove specific size 1 dimensions by specifying - /// `axis`. - /// - /// A `Tensor`. The `input` to squeeze. - /// An optional list of `ints`. Defaults to `[]`. If specified, only squeezes the dimensions listed. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `input`. - public static Tensor squeeze(Tensor input, int[] axis = null, string name = null) - => tf.Context.ExecuteOp("Squeeze", name, new ExecuteOpArgs(input) - .SetAttributes(new { squeeze_dims = axis })); - - /// - /// Return the shape of s0 op s1 with broadcast. - /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the - /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. - /// - /// A `Tensor`. Must be one of the following types: `int32`, `int64`. - /// A `Tensor`. Must have the same type as `s0`. - /// A name for the operation (optional). - /// `Tensor`. Has the same type as `s0`. - public static Tensor broadcast_args(Tensor s0, Tensor s1, string name = null) - => tf.Context.ExecuteOp("BroadcastArgs", name, new ExecuteOpArgs(s0, s1)); - - /// - /// Broadcast an array for a compatible shape. - /// - /// - /// - /// - /// - public static Tensor broadcast_to(Tensor input, T shape, string name = null) - => tf.Context.ExecuteOp("BroadcastTo", name, new ExecuteOpArgs(input, shape)); + public static Tensor concat_v2_eager_fallback(Tensors values, Tensor axis, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(values); + _inputs_flat_list.Add(axis); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "Tidx", axis.dtype }; + var _result = _execute.execute("ConcatV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ConcatV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Shuffle dimensions of x according to a permutation and conjugate the result. + /// + /// + /// + /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + /// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])` + /// + /// + /// + /// + /// + public static Tensor conjugate_transpose(Tensor x, Tensor perm, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConjugateTranspose", name) { args = new object[] { x, perm }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conjugate_transpose_eager_fallback(x, perm, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["perm"] = perm; + var _op = tf.OpDefLib._apply_op_helper("ConjugateTranspose", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tperm", _op._get_attr_type("Tperm") }; + _execute.record_gradient("ConjugateTranspose", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conjugate_transpose_eager_fallback(Tensor x, Tensor perm, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, perm }; + object[] _attrs = new object[] { "T", x.dtype, "Tperm", perm.dtype }; + var _result = _execute.execute("ConjugateTranspose", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ConjugateTranspose", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a constant tensor. + /// + /// + /// + /// Attr `value` is the tensor to return. + /// + /// + /// + /// + public static Tensor _const(TensorProto value, TF_DataType dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Const", name) { args = new object[] { }, attrs = new Dictionary() { ["value"] = value, ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return const_eager_fallback(value: value, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("Const", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "value", _op.get_attr("value"), "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("Const", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor const_eager_fallback(TensorProto value, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "value", value, "dtype", dtype }; + var _result = _execute.execute("Const", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Const", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Identity op for gradient debugging. + /// + /// + /// + /// This op is hidden from public in Python. It is used by TensorFlow Debugger to + /// register gradient tensors for gradient debugging. + /// This op operates on non-reference-type tensors. + /// + /// + /// + /// + public static Tensor debug_gradient_identity(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DebugGradientIdentity", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return debug_gradient_identity_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("DebugGradientIdentity", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DebugGradientIdentity", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor debug_gradient_identity_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("DebugGradientIdentity", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DebugGradientIdentity", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Identity op for gradient debugging. + /// + /// + /// + /// This op is hidden from public in Python. It is used by TensorFlow Debugger to + /// register gradient tensors for gradient debugging. + /// This op operates on reference-type tensors. + /// + /// + /// + /// + public static Tensor debug_gradient_ref_identity(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg input is a ref."); + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("DebugGradientRefIdentity", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DebugGradientRefIdentity", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor debug_gradient_ref_identity_eager_fallback(Tensor input, string name, Context ctx) + { + throw new RuntimeError($"debug_gradient_ref_identity op does not support eager execution. Arg 'input' is a ref."); + } + /// + /// Makes a copy of `x`. + /// + /// + /// + public static Tensor deep_copy(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DeepCopy", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return deep_copy_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("DeepCopy", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DeepCopy", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor deep_copy_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("DeepCopy", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DeepCopy", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// DepthToSpace for tensors of type T. + /// + /// + /// + /// Rearranges data from depth into blocks of spatial data. + /// This is the reverse transformation of SpaceToDepth. More specifically, + /// this op outputs a copy of the input tensor where values from the `depth` + /// dimension are moved in spatial blocks to the `height` and `width` dimensions. + /// The attr `block_size` indicates the input block size and how the data is moved. + /// + /// * Chunks of data of size `block_size * block_size` from depth are rearranged + /// into non-overlapping blocks of size `block_size x block_size` + /// * The width of the output tensor is `input_depth * block_size`, whereas the + /// height is `input_height * block_size`. + /// * The Y, X coordinates within each block of the output image are determined + /// by the high order component of the input channel index. + /// * The depth of the input tensor must be divisible by + /// `block_size * block_size`. + /// + /// The `data_format` attr specifies the layout of the input and output tensors + /// with the following options: + /// "NHWC": `[ batch, height, width, channels ]` + /// "NCHW": `[ batch, channels, height, width ]` + /// "NCHW_VECT_C": + /// `qint8 [ batch, channels / 4, height, width, 4 ]` + /// + /// It is useful to consider the operation as transforming a 6-D Tensor. + /// e.g. for data_format = NHWC, + /// Each element in the input tensor can be specified via 6 coordinates, + /// ordered by decreasing memory layout significance as: + /// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + /// within the input image, bX, bY means coordinates + /// within the output block, oC means output channels). + /// The output would be the input transposed to the following layout: + /// n,iY,bY,iX,bX,oC + /// + /// This operation is useful for resizing the activations between convolutions + /// (but keeping all data), e.g. instead of pooling. It is also useful for training + /// purely convolutional models. + /// + /// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and + /// block_size = 2: + /// + /// ``` + /// x = [[[[1, 2, 3, 4]]]] + /// + /// ``` + /// + /// This operation will output a tensor of shape `[1, 2, 2, 1]`: + /// + /// ``` + /// [[[[1], [2]], + /// [[3], [4]]]] + /// ``` + /// + /// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + /// the corresponding output will have 2x2 elements and will have a depth of + /// 1 channel (1 = `4 / (block_size * block_size)`). + /// The output element shape is `[2, 2, 1]`. + /// + /// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. + /// + /// ``` + /// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + /// ``` + /// + /// This operation, for block size of 2, will return the following tensor of shape + /// `[1, 2, 2, 3]` + /// + /// ``` + /// [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// ``` + /// + /// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: + /// + /// ``` + /// x = [[[[1, 2, 3, 4], + /// [5, 6, 7, 8]], + /// [[9, 10, 11, 12], + /// [13, 14, 15, 16]]]] + /// ``` + /// + /// the operator will return the following tensor of shape `[1 4 4 1]`: + /// + /// ``` + /// x = [[[ [1], [2], [5], [6]], + /// [ [3], [4], [7], [8]], + /// [ [9], [10], [13], [14]], + /// [ [11], [12], [15], [16]]]] + /// + /// ``` + /// + /// + /// + /// + /// + /// The size of the spatial block, same as in Space2Depth. + /// + /// + /// + /// + public static Tensor depth_to_space(Tensor input, int block_size = 0, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthToSpace", name) { args = new object[] { input }, attrs = new Dictionary() { ["block_size"] = block_size, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return depth_to_space_eager_fallback(input, block_size: block_size, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["block_size"] = block_size; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("DepthToSpace", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("DepthToSpace", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor depth_to_space_eager_fallback(Tensor input, int block_size, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "block_size", block_size, "data_format", data_format }; + var _result = _execute.execute("DepthToSpace", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DepthToSpace", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Dequantize the 'input' tensor into a float or bfloat16 Tensor. + /// + /// + /// + /// [min_range, max_range] are scalar floats that specify the range for + /// the output. The 'mode' attribute controls exactly which calculations are + /// used to convert the float values to their quantized equivalents. + /// + /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + /// + /// ``` + /// if T == qint8: in[i] += (range(T) + 1)/ 2.0 + /// out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + /// ``` + /// here `range(T) = numeric_limits::max() - numeric_limits::min()` + /// + /// *MIN_COMBINED Mode Example* + /// + /// If the input comes from a QuantizedRelu6, the output type is + /// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + /// 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + /// Dequantize on quint8 will take each value, cast to float, and multiply + /// by 6 / 255. + /// Note that if quantizedtype is qint8, the operation will additionally add + /// each value by 128 prior to casting. + /// + /// If the mode is 'MIN_FIRST', then this approach is used: + /// + /// ```c++ + /// num_discrete_values = 1 << (# of bits in T) + /// range_adjust = num_discrete_values / (num_discrete_values - 1) + /// range = (range_max - range_min) * range_adjust + /// range_scale = range / num_discrete_values + /// const double offset_input = static_cast(input) - lowest_quantized; + /// result = range_min + ((input - numeric_limits::min()) * range_scale) + /// ``` + /// + /// If the mode is `SCALED`, dequantization is performed by multiplying each + /// input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + /// + /// The scaling_factor is determined from `min_range`, `max_range`, and + /// `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}` + /// and `QuantizeV2`, using the following algorithm: + /// + /// ```c++ + /// + /// const int min_expected_T = std::numeric_limits::min() + + /// (narrow_range ? 1 : 0); + /// const int max_expected_T = std::numeric_limits::max(); + /// const float max_expected_T = std::numeric_limits::max(); + /// + /// const float scale_factor = + /// (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + /// : std::max(min_range / min_expected_T, + /// max_range / max_expected_T); + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Type of the output tensor. Currently Dequantize supports float and bfloat16. + /// If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. + /// + /// + /// + public static Tensor dequantize(Tensor input, Tensor min_range, Tensor max_range, string mode = "MIN_COMBINED", bool narrow_range = false, int axis = -1, TF_DataType dtype = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dequantize", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary() { ["mode"] = mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dequantize_eager_fallback(input, min_range, max_range, mode: mode, narrow_range: narrow_range, axis: axis, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (mode is null) + { + mode = "MIN_COMBINED"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["min_range"] = min_range; + keywords["max_range"] = max_range; + keywords["mode"] = mode; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("Dequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis"), "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("Dequantize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dequantize_eager_fallback(Tensor input, Tensor min_range, Tensor max_range, string mode, bool narrow_range, int axis, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, min_range, max_range }; + object[] _attrs = new object[] { "T", input.dtype, "mode", mode, "narrow_range", narrow_range, "axis", axis, "dtype", dtype }; + var _result = _execute.execute("Dequantize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Dequantize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a diagonal tensor with a given diagonal values. + /// + /// + /// + /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and + /// everything else padded with zeros. The diagonal is computed as follows: + /// + /// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of + /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: + /// + /// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. + /// + /// For example: + /// + /// ``` + /// # 'diagonal' is [1, 2, 3, 4] + /// tf.diag(diagonal) ==> [[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]] + /// ``` + /// + /// + /// + /// + public static Tensor diag(Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Diag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return diag_eager_fallback(diagonal, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("Diag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Diag", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor diag_eager_fallback(Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal }; + object[] _attrs = new object[] { "T", diagonal.dtype }; + var _result = _execute.execute("Diag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Diag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the diagonal part of the tensor. + /// + /// + /// + /// This operation returns a tensor with the `diagonal` part + /// of the `input`. The `diagonal` part is computed as follows: + /// + /// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a + /// tensor of rank `k` with dimensions `[D1,..., Dk]` where: + /// + /// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. + /// + /// For example: + /// + /// ``` + /// # 'input' is [[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]] + /// + /// tf.diag_part(input) ==> [1, 2, 3, 4] + /// ``` + /// + /// + /// + /// + public static Tensor diag_part(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return diag_part_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("DiagPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DiagPart", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor diag_part_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("DiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DiagPart", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the (possibly normalized) Levenshtein Edit Distance. + /// + /// + /// + /// The inputs are variable-length sequences provided by SparseTensors + /// (hypothesis_indices, hypothesis_values, hypothesis_shape) + /// and + /// (truth_indices, truth_values, truth_shape). + /// + /// The inputs are: + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// boolean (if true, edit distances are normalized by length of truth). + /// + /// The output is: + /// + /// + /// + public static Tensor edit_distance(Tensor hypothesis_indices, Tensor hypothesis_values, Tensor hypothesis_shape, Tensor truth_indices, Tensor truth_values, Tensor truth_shape, bool normalize = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EditDistance", name) { args = new object[] { hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape }, attrs = new Dictionary() { ["normalize"] = normalize } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return edit_distance_eager_fallback(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize: normalize, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["hypothesis_indices"] = hypothesis_indices; + keywords["hypothesis_values"] = hypothesis_values; + keywords["hypothesis_shape"] = hypothesis_shape; + keywords["truth_indices"] = truth_indices; + keywords["truth_values"] = truth_values; + keywords["truth_shape"] = truth_shape; + keywords["normalize"] = normalize; + var _op = tf.OpDefLib._apply_op_helper("EditDistance", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "normalize", _op._get_attr_bool("normalize"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("EditDistance", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor edit_distance_eager_fallback(Tensor hypothesis_indices, Tensor hypothesis_values, Tensor hypothesis_shape, Tensor truth_indices, Tensor truth_values, Tensor truth_shape, bool normalize, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape }; + object[] _attrs = new object[] { "normalize", normalize, "T", hypothesis_values.dtype }; + var _result = _execute.execute("EditDistance", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EditDistance", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor empty(Tensor shape, TF_DataType dtype, bool init = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Empty", name) { args = new object[] { shape }, attrs = new Dictionary() { ["dtype"] = dtype, ["init"] = init } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return empty_eager_fallback(shape, dtype: dtype, init: init, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["shape"] = shape; + keywords["dtype"] = dtype; + keywords["init"] = init; + var _op = tf.OpDefLib._apply_op_helper("Empty", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "init", _op._get_attr_bool("init") }; + _execute.record_gradient("Empty", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor empty_eager_fallback(Tensor shape, TF_DataType dtype, bool init, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { shape }; + object[] _attrs = new object[] { "dtype", dtype, "init", init }; + var _result = _execute.execute("Empty", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Empty", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Ensures that the tensor's shape matches the expected shape. + /// + /// + /// + /// Raises an error if the input tensor's shape does not match the specified shape. + /// Returns the input tensor otherwise. + /// + /// + /// + /// + /// + /// The expected (possibly partially specified) shape of the input tensor. + /// + /// + /// + public static Tensor ensure_shape(Tensor input, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EnsureShape", name) { args = new object[] { input }, attrs = new Dictionary() { ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ensure_shape_eager_fallback(input, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("EnsureShape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "shape", _op.get_attr("shape"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("EnsureShape", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ensure_shape_eager_fallback(Tensor input, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "shape", shape, "T", input.dtype }; + var _result = _execute.execute("EnsureShape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EnsureShape", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Inserts a dimension of 1 into a tensor's shape. + /// + /// + /// + /// Given a tensor `input`, this operation inserts a dimension of 1 at the + /// dimension index `dim` of `input`'s shape. The dimension index `dim` starts at + /// zero; if you specify a negative number for `dim` it is counted backward from + /// the end. + /// + /// This operation is useful if you want to add a batch dimension to a single + /// element. For example, if you have a single image of shape `[height, width, + /// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + /// which will make the shape `[1, height, width, channels]`. + /// + /// Other examples: + /// + /// ``` + /// # 't' is a tensor of shape [2] + /// shape(expand_dims(t, 0)) ==> [1, 2] + /// shape(expand_dims(t, 1)) ==> [2, 1] + /// shape(expand_dims(t, -1)) ==> [2, 1] + /// + /// # 't2' is a tensor of shape [2, 3, 5] + /// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + /// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + /// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + /// ``` + /// + /// This operation requires that: + /// + /// `-1-input.dims() <= dim <= input.dims()` + /// + /// This operation is related to `squeeze()`, which removes dimensions of + /// size 1. + /// + /// + /// + /// + /// + public static Tensor expand_dims(Tensor input, Tensor dim, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExpandDims", name) { args = new object[] { input, dim }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return expand_dims_eager_fallback(input, dim, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["dim"] = dim; + var _op = tf.OpDefLib._apply_op_helper("ExpandDims", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tdim", _op._get_attr_type("Tdim") }; + _execute.record_gradient("ExpandDims", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor expand_dims_eager_fallback(Tensor input, Tensor dim, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, dim }; + object[] _attrs = new object[] { "T", input.dtype, "Tdim", dim.dtype }; + var _result = _execute.execute("ExpandDims", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ExpandDims", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Extract `patches` from `images` and put them in the "depth" output dimension. + /// + /// + /// + /// + /// The size of the sliding window for each dimension of `images`. + /// + /// + /// + /// + /// How far the centers of two consecutive patches are in + /// the images. Must be: `[1, stride_rows, stride_cols, 1]`. + /// + /// + /// + /// + /// Must be: `[1, rate_rows, rate_cols, 1]`. This is the + /// input stride, specifying how far two consecutive patch samples are in the + /// input. Equivalent to extracting patches with + /// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by + /// subsampling them spatially by a factor of `rates`. This is equivalent to + /// `rate` in dilated (a.k.a. Atrous) convolutions. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor extract_image_patches(Tensor images, int[] ksizes, int[] strides, int[] rates, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractImagePatches", name) { args = new object[] { images }, attrs = new Dictionary() { ["ksizes"] = ksizes, ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return extract_image_patches_eager_fallback(images, ksizes: ksizes, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["images"] = images; + keywords["ksizes"] = ksizes; + keywords["strides"] = strides; + keywords["rates"] = rates; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("ExtractImagePatches", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("ExtractImagePatches", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor extract_image_patches_eager_fallback(Tensor images, int[] ksizes, int[] strides, int[] rates, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { images }; + object[] _attrs = new object[] { "ksizes", ksizes, "strides", strides, "rates", rates, "T", images.dtype, "padding", padding }; + var _result = _execute.execute("ExtractImagePatches", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ExtractImagePatches", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`. + /// + /// + /// + /// + /// The size of the sliding window for each dimension of `input`. + /// + /// + /// + /// + /// 1-D of length 5. How far the centers of two consecutive patches are in + /// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// The size-related attributes are specified as follows: + /// + /// ```python + /// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] + /// strides = [1, stride_planes, strides_rows, strides_cols, 1] + /// ``` + /// + /// + /// + public static Tensor extract_volume_patches(Tensor input, int[] ksizes, int[] strides, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractVolumePatches", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksizes"] = ksizes, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return extract_volume_patches_eager_fallback(input, ksizes: ksizes, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksizes"] = ksizes; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("ExtractVolumePatches", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("ExtractVolumePatches", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor extract_volume_patches_eager_fallback(Tensor input, int[] ksizes, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "ksizes", ksizes, "strides", strides, "T", input.dtype, "padding", padding }; + var _result = _execute.execute("ExtractVolumePatches", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ExtractVolumePatches", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. + /// + /// + /// + /// Attributes + /// + /// * `[min; max]` define the clamping range for the `inputs` data. + /// * `inputs` values are quantized into the quantization range ( + /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + /// when it is true) and then de-quantized and output as floats in `[min; max]` + /// interval. + /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// + /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// Quantization is called fake since the output is still in floating point. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor fake_quant_with_min_max_args(Tensor inputs, float min = -6f, float max = 6f, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgs", name) { args = new object[] { inputs }, attrs = new Dictionary() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_args_eager_fallback(inputs, min: min, max: max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxArgs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxArgs", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_quant_with_min_max_args_eager_fallback(Tensor inputs, float min, float max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { inputs }; + object[] _attrs = new object[] { "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxArgs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute gradients for a FakeQuantWithMinMaxArgs operation. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor fake_quant_with_min_max_args_gradient(Tensor gradients, Tensor inputs, float min = -6f, float max = 6f, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgsGradient", name) { args = new object[] { gradients, inputs }, attrs = new Dictionary() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_args_gradient_eager_fallback(gradients, inputs, min: min, max: max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxArgsGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxArgsGradient", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_quant_with_min_max_args_gradient_eager_fallback(Tensor gradients, Tensor inputs, float min, float max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, inputs }; + object[] _attrs = new object[] { "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxArgsGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Fake-quantize the 'inputs' tensor of type float via global float scalars + /// + /// + /// + /// Fake-quantize the `inputs` tensor of type float via global float scalars + /// `min` and `max` to `outputs` tensor of same shape as `inputs`. + /// + /// Attributes + /// + /// * `[min; max]` define the clamping range for the `inputs` data. + /// * `inputs` values are quantized into the quantization range ( + /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + /// when it is true) and then de-quantized and output as floats in `[min; max]` + /// interval. + /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// + /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// This operation has a gradient and thus allows for training `min` and `max` + /// values. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor fake_quant_with_min_max_vars(Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVars", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_vars_eager_fallback(inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVars", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxVars", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_quant_with_min_max_vars_eager_fallback(Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { inputs, min, max }; + object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxVars", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute gradients for a FakeQuantWithMinMaxVars operation. + /// + /// + /// + /// + /// + /// + /// + /// The bitwidth of the quantization; between 2 and 8, inclusive. + /// + /// + /// + /// + /// Whether to quantize into 2^num_bits - 1 distinct values. + /// + /// + /// + public static Tensor[] fake_quant_with_min_max_vars_gradient(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_vars_gradient_eager_fallback(gradients, inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxVarsGradient", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fake_quant_with_min_max_vars_gradient_eager_fallback(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, inputs, min, max }; + object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxVarsGradient", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Fake-quantize the 'inputs' tensor of type float via per-channel floats + /// + /// + /// + /// Fake-quantize the `inputs` tensor of type float per-channel and one of the + /// shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` + /// of shape `[d]` to `outputs` tensor of same shape as `inputs`. + /// + /// Attributes + /// + /// * `[min; max]` define the clamping range for the `inputs` data. + /// * `inputs` values are quantized into the quantization range ( + /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + /// when it is true) and then de-quantized and output as floats in `[min; max]` + /// interval. + /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// + /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// This operation has a gradient and thus allows for training `min` and `max` + /// values. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor fake_quant_with_min_max_vars_per_channel(Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannel", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_vars_per_channel_eager_fallback(inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannel", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannel", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_quant_with_min_max_vars_per_channel_eager_fallback(Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { inputs, min, max }; + object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxVarsPerChannel", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. + /// + /// + /// + /// + /// + /// + /// + /// The bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// + /// + /// + /// Whether to quantize into 2^num_bits - 1 distinct values. + /// + /// + /// + public static Tensor[] fake_quant_with_min_max_vars_per_channel_gradient(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannelGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(gradients, inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannelGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannelGradient", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, inputs, min, max }; + object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxVarsPerChannelGradient", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Creates a tensor filled with a scalar value. + /// + /// + /// + /// This operation creates a tensor of shape `dims` and fills it with `value`. + /// + /// For example: + /// + /// ``` + /// # Output tensor has shape [2, 3]. + /// fill([2, 3], 9) ==> [[9, 9, 9] + /// [9, 9, 9]] + /// ``` + /// + /// `tf.fill` differs from `tf.constant` in a few ways: + /// + /// * `tf.fill` only supports scalar contents, whereas `tf.constant` supports + /// Tensor values. + /// * `tf.fill` creates an Op in the computation graph that constructs the actual + /// Tensor value at runtime. This is in contrast to `tf.constant` which embeds + /// the entire Tensor into the graph with a `Const` node. + /// * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes + /// based on other runtime Tensors, unlike `tf.constant`. + /// + /// + /// + /// + /// + public static Tensor fill(Tensor dims, Tensor value, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fill", name) { args = new object[] { dims, value }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fill_eager_fallback(dims, value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dims"] = dims; + keywords["value"] = value; + var _op = tf.OpDefLib._apply_op_helper("Fill", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "index_type", _op._get_attr_type("index_type") }; + _execute.record_gradient("Fill", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fill_eager_fallback(Tensor dims, Tensor value, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { dims, value }; + object[] _attrs = new object[] { "T", value.dtype, "index_type", dims.dtype }; + var _result = _execute.execute("Fill", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Fill", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Generates fingerprint values. + /// + /// + /// + /// Generates fingerprint values of `data`. + /// + /// Fingerprint op considers the first dimension of `data` as the batch dimension, + /// and `output[i]` contains the fingerprint value generated from contents in + /// `data[i, ...]` for all `i`. + /// + /// Fingerprint op writes fingerprint values as byte arrays. For example, the + /// default method `farmhash64` generates a 64-bit fingerprint value at a time. + /// This 8-byte value is written out as an `uint8` array of size 8, in little-endian + /// order. + /// + /// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), + /// and that the fingerprint method is `farmhash64`. In this case, the output shape + /// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of + /// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in + /// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers + /// in `data[1, :, :]`. + /// + /// Note that this op fingerprints the raw underlying buffer, and it does not + /// fingerprint Tensor's metadata such as data type and/or shape. For example, the + /// fingerprint values are invariant under reshapes and bitcasts as long as the + /// batch dimension remain the same: + /// + /// ``` + /// Fingerprint(data) == Fingerprint(Reshape(data, ...)) + /// Fingerprint(data) == Fingerprint(Bitcast(data, ...)) + /// ``` + /// + /// For string data, one should expect `Fingerprint(data) != + /// Fingerprint(ReduceJoin(data))` in general. + /// + /// + /// + /// + /// + public static Tensor fingerprint(Tensor data, Tensor method, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fingerprint", name) { args = new object[] { data, method }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fingerprint_eager_fallback(data, method, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["method"] = method; + var _op = tf.OpDefLib._apply_op_helper("Fingerprint", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Fingerprint", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fingerprint_eager_fallback(Tensor data, Tensor method, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, method }; + object[] _attrs = new object[] { "T", data.dtype }; + var _result = _execute.execute("Fingerprint", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Fingerprint", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gather slices from `params` according to `indices`. + /// + /// + /// + /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + /// + /// ```python + /// # Scalar indices + /// output[:, ..., :] = params[indices, :, ... :] + /// + /// # Vector indices + /// output[i, :, ..., :] = params[indices[i], :, ... :] + /// + /// # Higher rank indices + /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + /// ``` + /// + /// If `indices` is a permutation and `len(indices) == params.shape[0]` then + /// this operation will permute `params` accordingly. + /// + /// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in + /// `indices` are always validated to be within range. If assigned to GPU, + /// out-of-bound indices result in safe but unspecified behavior, which may include + /// raising an error. + /// + ///
+ /// + ///
+ /// + ///
+ /// + /// + /// + /// + public static Tensor gather(Tensor params_, Tensor indices, bool validate_indices = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Gather", name) { args = new object[] { params_, indices }, attrs = new Dictionary() { ["validate_indices"] = validate_indices } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return gather_eager_fallback(params_, indices, validate_indices: validate_indices, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["params"] = params_; + keywords["indices"] = indices; + keywords["validate_indices"] = validate_indices; + var _op = tf.OpDefLib._apply_op_helper("Gather", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "validate_indices", _op._get_attr_bool("validate_indices"), "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("Gather", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor gather_eager_fallback(Tensor params_, Tensor indices, bool validate_indices, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { params_, indices }; + object[] _attrs = new object[] { "validate_indices", validate_indices, "Tparams", params_.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("Gather", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Gather", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gather slices from `params` into a Tensor with shape specified by `indices`. + /// + /// + /// + /// `indices` is a K-dimensional integer tensor, best thought of as a + /// (K-1)-dimensional tensor of indices into `params`, where each element defines a + /// slice of `params`: + /// + /// output[\(i_0, ..., i_{K-2}\)] = params[indices[\(i_0, ..., i_{K-2}\)]] + /// + /// Whereas in `tf.gather` `indices` defines slices into the `axis` + /// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the + /// first `N` dimensions of `params`, where `N = indices.shape[-1]`. + /// + /// The last dimension of `indices` can be at most the rank of + /// `params`: + /// + /// indices.shape[-1] <= params.rank + /// + /// The last dimension of `indices` corresponds to elements + /// (if `indices.shape[-1] == params.rank`) or slices + /// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + /// of `params`. The output tensor has shape + /// + /// indices.shape[:-1] + params.shape[indices.shape[-1]:] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, a 0 is stored in the + /// corresponding output value. + /// + /// Some examples below. + /// + /// Simple indexing into a matrix: + /// + /// ```python + /// indices = [[0, 0], [1, 1]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = ['a', 'd'] + /// ``` + /// + /// Slice indexing into a matrix: + /// + /// ```python + /// indices = [[1], [0]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [['c', 'd'], ['a', 'b']] + /// ``` + /// + /// Indexing into a 3-tensor: + /// + /// ```python + /// indices = [[1]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[['a1', 'b1'], ['c1', 'd1']]] + /// + /// + /// indices = [[0, 1], [1, 0]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [['c0', 'd0'], ['a1', 'b1']] + /// + /// + /// indices = [[0, 0, 1], [1, 0, 1]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = ['b0', 'b1'] + /// ``` + /// + /// Batched indexing into a matrix: + /// + /// ```python + /// indices = [[[0, 0]], [[0, 1]]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [['a'], ['b']] + /// ``` + /// + /// Batched slice indexing into a matrix: + /// + /// ```python + /// indices = [[[1]], [[0]]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [[['c', 'd']], [['a', 'b']]] + /// ``` + /// + /// Batched indexing into a 3-tensor: + /// + /// ```python + /// indices = [[[1]], [[0]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[[['a1', 'b1'], ['c1', 'd1']]], + /// [[['a0', 'b0'], ['c0', 'd0']]]] + /// + /// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[['c0', 'd0'], ['a1', 'b1']], + /// [['a0', 'b0'], ['c1', 'd1']]] + /// + /// + /// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [['b0', 'b1'], ['d0', 'c1']] + /// ``` + /// + /// See also `tf.gather` and `tf.batch_gather`. + /// + /// + /// + /// + /// + public static Tensor gather_nd(Tensor params_, Tensor indices, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherNd", name) { args = new object[] { params_, indices }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return gather_nd_eager_fallback(params_, indices, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["params"] = params_; + keywords["indices"] = indices; + var _op = tf.OpDefLib._apply_op_helper("GatherNd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("GatherNd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor gather_nd_eager_fallback(Tensor params_, Tensor indices, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { params_, indices }; + object[] _attrs = new object[] { "Tparams", params_.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("GatherNd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("GatherNd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gather slices from `params` axis `axis` according to `indices`. + /// + /// + /// + /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape `params.shape[:axis] + + /// indices.shape[batch_dims:] + params.shape[axis + 1:]` where: + /// + /// ```python + /// # Scalar indices (output is rank(params) - 1). + /// output[a_0, ..., a_n, b_0, ..., b_n] = + /// params[a_0, ..., a_n, indices, b_0, ..., b_n] + /// + /// # Vector indices (output is rank(params)). + /// output[a_0, ..., a_n, i, b_0, ..., b_n] = + /// params[a_0, ..., a_n, indices[i], b_0, ..., b_n] + /// + /// # Higher rank indices (output is rank(params) + rank(indices) - 1). + /// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + /// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + /// ``` + /// + ///
+ /// + ///
+ /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, a 0 is stored in the + /// corresponding output value. + /// + /// See also `tf.batch_gather` and `tf.gather_nd`. + /// + ///
+ /// + /// + /// + /// + /// + public static Tensor gather_v2(Tensor params_, Tensor indices, Tensor axis, int batch_dims = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherV2", name) { args = new object[] { params_, indices, axis }, attrs = new Dictionary() { ["batch_dims"] = batch_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return gather_v2_eager_fallback(params_, indices, axis, batch_dims: batch_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["params"] = params_; + keywords["indices"] = indices; + keywords["axis"] = axis; + keywords["batch_dims"] = batch_dims; + var _op = tf.OpDefLib._apply_op_helper("GatherV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "batch_dims", _op._get_attr_int("batch_dims"), "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices"), "Taxis", _op._get_attr_type("Taxis") }; + _execute.record_gradient("GatherV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor gather_v2_eager_fallback(Tensor params_, Tensor indices, Tensor axis, int batch_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { params_, indices, axis }; + object[] _attrs = new object[] { "batch_dims", batch_dims, "Tparams", params_.dtype, "Tindices", indices.dtype, "Taxis", axis.dtype }; + var _result = _execute.execute("GatherV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("GatherV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gives a guarantee to the TF runtime that the input tensor is a constant. + /// + /// + /// + /// The runtime is then free to make optimizations based on this. + /// + /// Only accepts value typed tensors as inputs and rejects resource variable handles + /// as input. + /// + /// Returns the input tensor without modification. + /// + /// + /// + /// + public static Tensor guarantee_const(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GuaranteeConst", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return guarantee_const_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("GuaranteeConst", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("GuaranteeConst", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor guarantee_const_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("GuaranteeConst", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("GuaranteeConst", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return a tensor with the same shape and contents as the input tensor or value. + /// + /// + /// + public static Tensor identity(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Identity", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return identity_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Identity", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Identity", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor identity_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Identity", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Identity", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a list of tensors with the same shapes and contents as the input + /// + /// + /// + /// tensors. + /// + /// This op can be used to override the gradient for complicated functions. For + /// example, suppose y = f(x) and we wish to apply a custom function g for backprop + /// such that dx = g(dy). In Python, + /// + /// ```python + /// with tf.get_default_graph().gradient_override_map( + /// {'IdentityN': 'OverrideGradientWithG'}): + /// y, _ = identity_n([f(x), x]) + /// + /// @tf.RegisterGradient('OverrideGradientWithG') + /// def ApplyG(op, dy, _): + /// return [None, g(dy)] # Do not backprop to f(x). + /// ``` + /// + /// + /// + /// + /// + public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityN", name) { args = new object[] { input }, attrs = new Dictionary() { ["T"] = T } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return identity_n_eager_fallback(input, T: T, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["T"] = T; + var _op = tf.OpDefLib._apply_op_helper("IdentityN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T") }; + _execute.record_gradient("IdentityN", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor identity_n_eager_fallback(Tensor input, TF_DataType[] T, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", T }; + var _result = _execute.execute("IdentityN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IdentityN", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns immutable tensor from memory region. + /// + /// + /// + /// The current implementation memmaps the tensor from a file. + /// + /// + /// + /// + /// Type of the returned tensor. + /// + /// + /// + /// + /// Shape of the returned tensor. + /// + /// + /// + /// + /// Name of readonly memory region used by the tensor, see + /// NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + /// + /// + /// + public static Tensor immutable_const(TF_DataType dtype, Shape shape, string memory_region_name, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ImmutableConst", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape, ["memory_region_name"] = memory_region_name } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return immutable_const_eager_fallback(dtype: dtype, shape: shape, memory_region_name: memory_region_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dtype"] = dtype; + keywords["shape"] = shape; + keywords["memory_region_name"] = memory_region_name; + var _op = tf.OpDefLib._apply_op_helper("ImmutableConst", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape"), "memory_region_name", _op.get_attr("memory_region_name") }; + _execute.record_gradient("ImmutableConst", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor immutable_const_eager_fallback(TF_DataType dtype, Shape shape, string memory_region_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "dtype", dtype, "shape", shape, "memory_region_name", memory_region_name }; + var _result = _execute.execute("ImmutableConst", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ImmutableConst", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor inplace_add(Tensor x, Tensor i, Tensor v, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceAdd", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inplace_add_eager_fallback(x, i, v, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["i"] = i; + keywords["v"] = v; + var _op = tf.OpDefLib._apply_op_helper("InplaceAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InplaceAdd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inplace_add_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, i, v }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("InplaceAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InplaceAdd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor inplace_sub(Tensor x, Tensor i, Tensor v, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceSub", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inplace_sub_eager_fallback(x, i, v, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["i"] = i; + keywords["v"] = v; + var _op = tf.OpDefLib._apply_op_helper("InplaceSub", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InplaceSub", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inplace_sub_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, i, v }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("InplaceSub", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InplaceSub", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor inplace_update(Tensor x, Tensor i, Tensor v, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceUpdate", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inplace_update_eager_fallback(x, i, v, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["i"] = i; + keywords["v"] = v; + var _op = tf.OpDefLib._apply_op_helper("InplaceUpdate", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InplaceUpdate", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inplace_update_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, i, v }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("InplaceUpdate", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InplaceUpdate", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the inverse permutation of a tensor. + /// + /// + /// + /// This operation computes the inverse of an index permutation. It takes a 1-D + /// integer tensor `x`, which represents the indices of a zero-based array, and + /// swaps each value with its index position. In other words, for an output tensor + /// `y` and an input tensor `x`, this operation computes the following: + /// + /// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` + /// + /// The values must include 0. There can be no duplicate values or negative values. + /// + /// For example: + /// + /// ``` + /// # tensor `x` is [3, 4, 0, 2, 1] + /// invert_permutation(x) ==> [2, 4, 3, 0, 1] + /// ``` + /// + /// + /// + /// + public static Tensor invert_permutation(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvertPermutation", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return invert_permutation_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("InvertPermutation", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InvertPermutation", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor invert_permutation_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("InvertPermutation", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InvertPermutation", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the difference between two lists of numbers or strings. + /// + /// + /// + /// Given a list `x` and a list `y`, this operation returns a list `out` that + /// represents all values that are in `x` but not in `y`. The returned list `out` + /// is sorted in the same order that the numbers appear in `x` (duplicates are + /// preserved). This operation also returns a list `idx` that represents the + /// position of each `out` element in `x`. In other words: + /// + /// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + /// + /// For example, given this input: + /// + /// ``` + /// x = [1, 2, 3, 4, 5, 6] + /// y = [1, 3, 5] + /// ``` + /// + /// This operation would return: + /// + /// ``` + /// out ==> [2, 4, 6] + /// idx ==> [1, 3, 5] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor[] list_diff(Tensor x, Tensor y, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ListDiff", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return list_diff_eager_fallback(x, y, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("ListDiff", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("ListDiff", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] list_diff_eager_fallback(Tensor x, Tensor y, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "out_idx", out_idx }; + var _result = _execute.execute("ListDiff", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ListDiff", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Applies lower_bound(sorted_search_values, values) along each row. + /// + /// + /// + /// Each set of rows with the same index in (sorted_inputs, values) is treated + /// independently. The resulting row is the equivalent of calling + /// `np.searchsorted(sorted_inputs, values, side='left')`. + /// + /// The result is not a global index to the entire + /// `Tensor`, but rather just the index in the last dimension. + /// + /// A 2-D example: + /// sorted_sequence = [[0, 3, 9, 9, 10], + /// [1, 2, 3, 4, 5]] + /// values = [[2, 4, 9], + /// [0, 2, 6]] + /// + /// result = LowerBound(sorted_sequence, values) + /// + /// result == [[1, 2, 2], + /// [0, 1, 5]] + /// + /// + /// + /// + /// + /// + public static Tensor lower_bound(Tensor sorted_inputs, Tensor values, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LowerBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return lower_bound_eager_fallback(sorted_inputs, values, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["sorted_inputs"] = sorted_inputs; + keywords["values"] = values; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("LowerBound", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("LowerBound", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor lower_bound_eager_fallback(Tensor sorted_inputs, Tensor values, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { sorted_inputs, values }; + object[] _attrs = new object[] { "T", sorted_inputs.dtype, "out_type", out_type }; + var _result = _execute.execute("LowerBound", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LowerBound", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Copy a tensor setting everything outside a central band in each innermost matrix to zero. + /// + /// + /// + /// The `band` part is computed as follows: + /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + /// tensor with the same shape where + /// + /// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. + /// + /// The indicator function + /// + /// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && + /// (num_upper < 0 || (n-m) <= num_upper)`. + /// + /// For example: + /// + /// ``` + /// # if 'input' is [[ 0, 1, 2, 3] + /// # [-1, 0, 1, 2] + /// # [-2, -1, 0, 1] + /// # [-3, -2, -1, 0]], + /// + /// tf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + /// [-1, 0, 1, 2] + /// [ 0, -1, 0, 1] + /// [ 0, 0, -1, 0]], + /// + /// tf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + /// [-1, 0, 1, 0] + /// [-2, -1, 0, 1] + /// [ 0, -2, -1, 0]] + /// ``` + /// + /// Useful special cases: + /// + /// ``` + /// tf.linalg.band_part(input, 0, -1) ==> Upper triangular part. + /// tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. + /// tf.linalg.band_part(input, 0, 0) ==> Diagonal. + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor matrix_band_part(Tensor input, Tensor num_lower, Tensor num_upper, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_band_part_eager_fallback(input, num_lower, num_upper, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["num_lower"] = num_lower; + keywords["num_upper"] = num_upper; + var _op = tf.OpDefLib._apply_op_helper("MatrixBandPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindex", _op._get_attr_type("Tindex") }; + _execute.record_gradient("MatrixBandPart", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_band_part_eager_fallback(Tensor input, Tensor num_lower, Tensor num_upper, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, num_lower, num_upper }; + object[] _attrs = new object[] { "T", input.dtype, "Tindex", num_lower.dtype }; + var _result = _execute.execute("MatrixBandPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixBandPart", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched diagonal tensor with a given batched diagonal values. + /// + /// + /// + /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and + /// everything else padded with zeros. The diagonal is computed as follows: + /// + /// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a + /// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where: + /// + /// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. + /// + /// For example: + /// + /// ``` + /// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] + /// + /// and diagonal.shape = (2, 4) + /// + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0] + /// [0, 6, 0, 0] + /// [0, 0, 7, 0] + /// [0, 0, 0, 8]]] + /// + /// which has shape (2, 4, 4) + /// ``` + /// + /// + /// + /// + public static Tensor matrix_diag(Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_eager_fallback(diagonal, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixDiag", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_eager_fallback(Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal }; + object[] _attrs = new object[] { "T", diagonal.dtype }; + var _result = _execute.execute("MatrixDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the batched diagonal part of a batched tensor. + /// + /// + /// + /// This operation returns a tensor with the `diagonal` part + /// of the batched `input`. The `diagonal` part is computed as follows: + /// + /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + /// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where: + /// + /// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// ``` + /// # 'input' is [[[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0] + /// [0, 6, 0, 0] + /// [0, 0, 7, 0] + /// [0, 0, 0, 8]]] + /// + /// and input.shape = (2, 4, 4) + /// + /// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] + /// + /// which has shape (2, 4) + /// ``` + /// + /// + /// + /// + public static Tensor matrix_diag_part(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_part_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixDiagPart", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_part_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("MatrixDiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagPart", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the batched diagonal part of a batched tensor. + /// + /// + /// + /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + /// `input`. + /// + /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + /// Let `max_diag_len` be the maximum length among all diagonals to be extracted, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// Let `num_diags` be the number of diagonals to extract, + /// `num_diags = k[1] - k[0] + 1`. + /// + /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + /// `[I, J, ..., L, max_diag_len]` and values: + /// + /// ``` + /// diagonal[i, j, ..., l, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + /// + /// Otherwise, the output tensor has rank `r` with dimensions + /// `[I, J, ..., L, num_diags, max_diag_len]` with values: + /// + /// ``` + /// diagonal[i, j, ..., l, m, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// ``` + /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + /// [5, 6, 7, 8], + /// [9, 8, 7, 6]], + /// [[5, 4, 3, 2], + /// [1, 2, 3, 4], + /// [5, 6, 7, 8]]]) + /// + /// # A main diagonal from each batch. + /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + /// [5, 2, 7]] + /// + /// # A superdiagonal from each batch. + /// tf.matrix_diag_part(input, k = 1) + /// ==> [[2, 7, 6], # Output shape: (2, 3) + /// [4, 3, 8]] + /// + /// # A tridiagonal band from each batch. + /// tf.matrix_diag_part(input, k = (-1, 1)) + /// ==> [[[2, 7, 6], # Output shape: (2, 3, 3) + /// [1, 6, 7], + /// [5, 8, 0]], + /// [[4, 3, 8], + /// [5, 2, 7], + /// [1, 6, 0]]] + /// + /// # Padding value = 9 + /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + /// ==> [[[4, 9, 9], # Output shape: (2, 3, 3) + /// [3, 8, 9], + /// [2, 7, 6]], + /// [[2, 9, 9], + /// [3, 4, 9], + /// [4, 3, 8]]] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor matrix_diag_part_v2(Tensor input, Tensor k, Tensor padding_value, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV2", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_part_v2_eager_fallback(input, k, padding_value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["padding_value"] = padding_value; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPartV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixDiagPartV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_part_v2_eager_fallback(Tensor input, Tensor k, Tensor padding_value, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, k, padding_value }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("MatrixDiagPartV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagPartV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the batched diagonal part of a batched tensor. + /// + /// + /// + /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + /// `input`. + /// + /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + /// Let `max_diag_len` be the maximum length among all diagonals to be extracted, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// Let `num_diags` be the number of diagonals to extract, + /// `num_diags = k[1] - k[0] + 1`. + /// + /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + /// `[I, J, ..., L, max_diag_len]` and values: + /// + /// ``` + /// diagonal[i, j, ..., l, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + /// + /// Otherwise, the output tensor has rank `r` with dimensions + /// `[I, J, ..., L, num_diags, max_diag_len]` with values: + /// + /// ``` + /// diagonal[i, j, ..., l, m, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. + /// + /// `offset` is zero except when the alignment of the diagonal is to the right. + /// ``` + /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + /// and `d >= 0`) or + /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + /// and `d <= 0`) + /// 0 ; otherwise + /// ``` + /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// ``` + /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + /// [5, 6, 7, 8], + /// [9, 8, 7, 6]], + /// [[5, 4, 3, 2], + /// [1, 2, 3, 4], + /// [5, 6, 7, 8]]]) + /// + /// # A main diagonal from each batch. + /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + /// [5, 2, 7]] + /// + /// # A superdiagonal from each batch. + /// tf.matrix_diag_part(input, k = 1) + /// ==> [[2, 7, 6], # Output shape: (2, 3) + /// [4, 3, 8]] + /// + /// # A band from each batch. + /// tf.matrix_diag_part(input, k = (-1, 2)) + /// ==> [[[0, 3, 8], # Output shape: (2, 4, 3) + /// [2, 7, 6], + /// [1, 6, 7], + /// [5, 8, 0]], + /// [[0, 3, 4], + /// [4, 3, 8], + /// [5, 2, 7], + /// [1, 6, 0]]] + /// + /// # LEFT_RIGHT alignment. + /// tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") + /// ==> [[[3, 8, 0], # Output shape: (2, 4, 3) + /// [2, 7, 6], + /// [1, 6, 7], + /// [0, 5, 8]], + /// [[3, 4, 0], + /// [4, 3, 8], + /// [5, 2, 7], + /// [0, 1, 6]]] + /// + /// # max_diag_len can be shorter than the main diagonal. + /// tf.matrix_diag_part(input, k = (-2, -1)) + /// ==> [[[5, 8], + /// [9, 0]], + /// [[1, 6], + /// [5, 0]]] + /// + /// # padding_value = 9 + /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + /// ==> [[[9, 9, 4], # Output shape: (2, 3, 3) + /// [9, 3, 8], + /// [2, 7, 6]], + /// [[9, 9, 2], + /// [9, 3, 4], + /// [4, 3, 8]]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + /// a string specifying how superdiagonals and subdiagonals should be aligned, + /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), + /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + /// to the right (left-pads the row) and subdiagonals to the left (right-pads the + /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + /// the opposite alignment. + /// + /// + /// + public static Tensor matrix_diag_part_v3(Tensor input, Tensor k, Tensor padding_value, string align = "RIGHT_LEFT", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV3", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary() { ["align"] = align } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_part_v3_eager_fallback(input, k, padding_value, align: align, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (align is null) + { + align = "RIGHT_LEFT"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["padding_value"] = padding_value; + keywords["align"] = align; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPartV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") }; + _execute.record_gradient("MatrixDiagPartV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_part_v3_eager_fallback(Tensor input, Tensor k, Tensor padding_value, string align, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, k, padding_value }; + object[] _attrs = new object[] { "T", input.dtype, "align", align }; + var _result = _execute.execute("MatrixDiagPartV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagPartV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched diagonal tensor with given batched diagonal values. + /// + /// + /// + /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + /// diagonals of a matrix, with everything else padded with `padding`. `num_rows` + /// and `num_cols` specify the dimension of the innermost matrix of the output. If + /// both are not specified, the op assumes the innermost matrix is square and infers + /// its size from `k` and the innermost dimension of `diagonal`. If only one of them + /// is specified, the op assumes the unspecified value is the smallest possible + /// based on other criteria. + /// + /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + /// + /// The second innermost dimension of `diagonal` has double meaning. + /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + /// [I, J, ..., M], and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + /// padding_value ; otherwise + /// ``` + /// + /// Otherwise, `M` is treated as the number of diagonals for the matrix in the + /// same batch (`M = k[1]-k[0]+1`), and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// padding_value ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + /// [5, 6, 7, 8]]) + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + /// [0, 2, 0, 0], + /// [0, 0, 3, 0], + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0], + /// [0, 6, 0, 0], + /// [0, 0, 7, 0], + /// [0, 0, 0, 8]]] + /// + /// # A superdiagonal (per batch). + /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_diag(diagonal, k = 1) + /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + /// [0, 0, 2, 0], + /// [0, 0, 0, 3], + /// [0, 0, 0, 0]], + /// [[0, 4, 0, 0], + /// [0, 0, 5, 0], + /// [0, 0, 0, 6], + /// [0, 0, 0, 0]]] + /// + /// # A band of diagonals. + /// diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) + /// [4, 5, 0]], + /// [[6, 7, 9], + /// [9, 1, 0]]]) + /// tf.matrix_diag(diagonals, k = (-1, 0)) + /// ==> [[[1, 0, 0], # Output shape: (2, 3, 3) + /// [4, 2, 0], + /// [0, 5, 3]], + /// [[6, 0, 0], + /// [9, 7, 0], + /// [0, 1, 9]]] + /// + /// # Rectangular matrix. + /// diagonal = np.array([1, 2]) # Input shape: (2) + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + /// ==> [[0, 0, 0, 0], # Output shape: (3, 4) + /// [1, 0, 0, 0], + /// [0, 2, 0, 0]] + /// + /// # Rectangular matrix with inferred num_cols and padding_value = 9. + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + /// ==> [[9, 9], # Output shape: (3, 2) + /// [1, 9], + /// [9, 2]] + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor matrix_diag_v2(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV2", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_v2_eager_fallback(diagonal, k, num_rows, num_cols, padding_value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + keywords["k"] = k; + keywords["num_rows"] = num_rows; + keywords["num_cols"] = num_cols; + keywords["padding_value"] = padding_value; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixDiagV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_v2_eager_fallback(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal, k, num_rows, num_cols, padding_value }; + object[] _attrs = new object[] { "T", diagonal.dtype }; + var _result = _execute.execute("MatrixDiagV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched diagonal tensor with given batched diagonal values. + /// + /// + /// + /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + /// diagonals of a matrix, with everything else padded with `padding`. `num_rows` + /// and `num_cols` specify the dimension of the innermost matrix of the output. If + /// both are not specified, the op assumes the innermost matrix is square and infers + /// its size from `k` and the innermost dimension of `diagonal`. If only one of them + /// is specified, the op assumes the unspecified value is the smallest possible + /// based on other criteria. + /// + /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + /// + /// The second innermost dimension of `diagonal` has double meaning. + /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + /// [I, J, ..., M], and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + /// padding_value ; otherwise + /// ``` + /// + /// Otherwise, `M` is treated as the number of diagonals for the matrix in the + /// same batch (`M = k[1]-k[0]+1`), and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// padding_value ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = [k] - d`, and + /// `index_in_diag = n - max(d, 0) + offset`. + /// + /// `offset` is zero except when the alignment of the diagonal is to the right. + /// ``` + /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + /// and `d >= 0`) or + /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + /// and `d <= 0`) + /// 0 ; otherwise + /// ``` + /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + /// [5, 6, 7, 8]]) + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + /// [0, 2, 0, 0], + /// [0, 0, 3, 0], + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0], + /// [0, 6, 0, 0], + /// [0, 0, 7, 0], + /// [0, 0, 0, 8]]] + /// + /// # A superdiagonal (per batch). + /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_diag(diagonal, k = 1) + /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + /// [0, 0, 2, 0], + /// [0, 0, 0, 3], + /// [0, 0, 0, 0]], + /// [[0, 4, 0, 0], + /// [0, 0, 5, 0], + /// [0, 0, 0, 6], + /// [0, 0, 0, 0]]] + /// + /// # A tridiagonal band (per batch). + /// diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) + /// [1, 2, 3], + /// [4, 5, 0]], + /// [[0, 2, 3], + /// [6, 7, 9], + /// [9, 1, 0]]]) + /// tf.matrix_diag(diagonals, k = (-1, 1)) + /// ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + /// [4, 2, 9], + /// [0, 5, 3]], + /// [[6, 2, 0], + /// [9, 7, 3], + /// [0, 1, 9]]] + /// + /// # LEFT_RIGHT alignment. + /// diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) + /// [1, 2, 3], + /// [0, 4, 5]], + /// [[2, 3, 0], + /// [6, 7, 9], + /// [0, 9, 1]]]) + /// tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") + /// ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + /// [4, 2, 9], + /// [0, 5, 3]], + /// [[6, 2, 0], + /// [9, 7, 3], + /// [0, 1, 9]]] + /// + /// # Rectangular matrix. + /// diagonal = np.array([1, 2]) # Input shape: (2) + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + /// ==> [[0, 0, 0, 0], # Output shape: (3, 4) + /// [1, 0, 0, 0], + /// [0, 2, 0, 0]] + /// + /// # Rectangular matrix with inferred num_cols and padding_value = 9. + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + /// ==> [[9, 9], # Output shape: (3, 2) + /// [1, 9], + /// [9, 2]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + /// a string specifying how superdiagonals and subdiagonals should be aligned, + /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), + /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + /// to the right (left-pads the row) and subdiagonals to the left (right-pads the + /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + /// the opposite alignment. + /// + /// + /// + public static Tensor matrix_diag_v3(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string align = "RIGHT_LEFT", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV3", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary() { ["align"] = align } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_v3_eager_fallback(diagonal, k, num_rows, num_cols, padding_value, align: align, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (align is null) + { + align = "RIGHT_LEFT"; + } + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + keywords["k"] = k; + keywords["num_rows"] = num_rows; + keywords["num_cols"] = num_cols; + keywords["padding_value"] = padding_value; + keywords["align"] = align; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") }; + _execute.record_gradient("MatrixDiagV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_v3_eager_fallback(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string align, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal, k, num_rows, num_cols, padding_value }; + object[] _attrs = new object[] { "T", diagonal.dtype, "align", align }; + var _result = _execute.execute("MatrixDiagV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// + /// + /// Given `input` and `diagonal`, this operation returns a tensor with the + /// same shape and values as `input`, except for the main diagonal of the + /// innermost matrices. These will be overwritten by the values in `diagonal`. + /// + /// The output is computed as follows: + /// + /// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has + /// `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a + /// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where: + /// + /// * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`. + /// * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`. + /// + /// + /// + /// + /// + public static Tensor matrix_set_diag(Tensor input, Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_set_diag_eager_fallback(input, diagonal, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixSetDiag", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_set_diag_eager_fallback(Tensor input, Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, diagonal }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("MatrixSetDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixSetDiag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// + /// + /// Given `input` and `diagonal`, this operation returns a tensor with the + /// same shape and values as `input`, except for the specified diagonals of the + /// innermost matrices. These will be overwritten by the values in `diagonal`. + /// + /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or + /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. + /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. + /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. + /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// + /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. + /// If `k` is scalar or `k[0] == k[1]`: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// + /// Otherwise, + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]], + /// [[7, 7, 7, 7], + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]]]) + /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + /// [7, 2, 7, 7], + /// [7, 7, 3, 7]], + /// [[4, 7, 7, 7], + /// [7, 5, 7, 7], + /// [7, 7, 6, 7]]] + /// + /// # A superdiagonal (per batch). + /// tf.matrix_set_diag(diagonal, k = 1) + /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + /// [7, 7, 2, 7], + /// [7, 7, 7, 3]], + /// [[7, 4, 7, 7], + /// [7, 7, 5, 7], + /// [7, 7, 7, 6]]] + /// + /// # A band of diagonals. + /// diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3) + /// [4, 5, 0]], + /// [[6, 1, 2], + /// [3, 4, 0]]]) + /// tf.matrix_set_diag(diagonals, k = (-1, 0)) + /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + /// [4, 2, 7, 7], + /// [0, 5, 3, 7]], + /// [[6, 7, 7, 7], + /// [3, 1, 7, 7], + /// [7, 4, 2, 7]]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor matrix_set_diag_v2(Tensor input, Tensor diagonal, Tensor k, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV2", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_set_diag_v2_eager_fallback(input, diagonal, k, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["diagonal"] = diagonal; + keywords["k"] = k; + var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiagV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixSetDiagV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_set_diag_v2_eager_fallback(Tensor input, Tensor diagonal, Tensor k, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, diagonal, k }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("MatrixSetDiagV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixSetDiagV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// + /// + /// Given `input` and `diagonal`, this operation returns a tensor with the + /// same shape and values as `input`, except for the specified diagonals of the + /// innermost matrices. These will be overwritten by the values in `diagonal`. + /// + /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or + /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. + /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. + /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. + /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// + /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. + /// If `k` is scalar or `k[0] == k[1]`: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// + /// Otherwise, + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = k[1] - d`, and + /// `index_in_diag = n - max(d, 0) + offset`. + /// + /// `offset` is zero except when the alignment of the diagonal is to the right. + /// ``` + /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + /// and `d >= 0`) or + /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + /// and `d <= 0`) + /// 0 ; otherwise + /// ``` + /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]], + /// [[7, 7, 7, 7], + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]]]) + /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_set_diag(input, diagonal) + /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + /// [7, 2, 7, 7], + /// [7, 7, 3, 7]], + /// [[4, 7, 7, 7], + /// [7, 5, 7, 7], + /// [7, 7, 6, 7]]] + /// + /// # A superdiagonal (per batch). + /// tf.matrix_set_diag(input, diagonal, k = 1) + /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + /// [7, 7, 2, 7], + /// [7, 7, 7, 3]], + /// [[7, 4, 7, 7], + /// [7, 7, 5, 7], + /// [7, 7, 7, 6]]] + /// + /// # A band of diagonals. + /// diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) + /// [6, 5, 8], + /// [1, 2, 3], + /// [4, 5, 0]], + /// [[0, 1, 2], + /// [5, 6, 4], + /// [6, 1, 2], + /// [3, 4, 0]]]) + /// tf.matrix_set_diag(input, diagonals, k = (-1, 2)) + /// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + /// [4, 2, 5, 1], + /// [7, 5, 3, 8]], + /// [[6, 5, 1, 7], + /// [3, 1, 6, 2], + /// [7, 4, 2, 4]]] + /// + /// # LEFT_RIGHT alignment. + /// diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) + /// [6, 5, 8], + /// [1, 2, 3], + /// [0, 4, 5]], + /// [[1, 2, 0], + /// [5, 6, 4], + /// [6, 1, 2], + /// [0, 3, 4]]]) + /// tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") + /// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + /// [4, 2, 5, 1], + /// [7, 5, 3, 8]], + /// [[6, 5, 1, 7], + /// [3, 1, 6, 2], + /// [7, 4, 2, 4]]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + /// a string specifying how superdiagonals and subdiagonals should be aligned, + /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), + /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + /// to the right (left-pads the row) and subdiagonals to the left (right-pads the + /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + /// the opposite alignment. + /// + /// + /// + public static Tensor matrix_set_diag_v3(Tensor input, Tensor diagonal, Tensor k, string align = "RIGHT_LEFT", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV3", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary() { ["align"] = align } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_set_diag_v3_eager_fallback(input, diagonal, k, align: align, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (align is null) + { + align = "RIGHT_LEFT"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["diagonal"] = diagonal; + keywords["k"] = k; + keywords["align"] = align; + var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiagV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") }; + _execute.record_gradient("MatrixSetDiagV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_set_diag_v3_eager_fallback(Tensor input, Tensor diagonal, Tensor k, string align, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, diagonal, k }; + object[] _attrs = new object[] { "T", input.dtype, "align", align }; + var _result = _execute.execute("MatrixSetDiagV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixSetDiagV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Pads a tensor with mirrored values. + /// + /// + /// + /// This operation pads a `input` with mirrored values according to the `paddings` + /// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is + /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many values to add before the contents of `input` in that dimension, and + /// `paddings[D, 1]` indicates how many values to add after the contents of `input` + /// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater + /// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true + /// (if false, respectively). + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 2, 3], [4, 5, 6]]. + /// # 'paddings' is [[1, 1]], [2, 2]]. + /// # 'mode' is SYMMETRIC. + /// # rank of 't' is 2. + /// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + /// [2, 1, 1, 2, 3, 3, 2] + /// [5, 4, 4, 5, 6, 6, 5] + /// [5, 4, 4, 5, 6, 6, 5]] + /// ``` + /// + /// + /// + /// + /// + /// + /// Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + /// do not include the borders, while in symmetric mode the padded regions + /// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` + /// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and + /// it is `[1, 2, 3, 3, 2]` in symmetric mode. + /// + /// + /// + public static Tensor mirror_pad(Tensor input, Tensor paddings, string mode, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["mode"] = mode } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mirror_pad_eager_fallback(input, paddings, mode: mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["mode"] = mode; + var _op = tf.OpDefLib._apply_op_helper("MirrorPad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "mode", _op.get_attr("mode") }; + _execute.record_gradient("MirrorPad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mirror_pad_eager_fallback(Tensor input, Tensor paddings, string mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype, "mode", mode }; + var _result = _execute.execute("MirrorPad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MirrorPad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor. + /// + /// + /// + /// This operation folds the padded areas of `input` by `MirrorPad` according to the + /// `paddings` you specify. `paddings` must be the same as `paddings` argument + /// given to the corresponding `MirrorPad` op. + /// + /// The folded size of each dimension D of the output is: + /// + /// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. + /// # 'paddings' is [[0, 1]], [0, 1]]. + /// # 'mode' is SYMMETRIC. + /// # rank of 't' is 2. + /// pad(t, paddings) ==> [[ 1, 5] + /// [11, 28]] + /// ``` + /// + /// + /// + /// + /// + /// + /// The mode used in the `MirrorPad` op. + /// + /// + /// + public static Tensor mirror_pad_grad(Tensor input, Tensor paddings, string mode, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPadGrad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["mode"] = mode } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mirror_pad_grad_eager_fallback(input, paddings, mode: mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["mode"] = mode; + var _op = tf.OpDefLib._apply_op_helper("MirrorPadGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "mode", _op.get_attr("mode") }; + _execute.record_gradient("MirrorPadGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mirror_pad_grad_eager_fallback(Tensor input, Tensor paddings, string mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype, "mode", mode }; + var _result = _execute.execute("MirrorPadGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MirrorPadGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a one-hot tensor. + /// + /// + /// + /// The locations represented by indices in `indices` take value `on_value`, + /// while all other locations take value `off_value`. + /// + /// If the input `indices` is rank `N`, the output will have rank `N+1`, + /// The new axis is created at dimension `axis` (default: the new axis is + /// appended at the end). + /// + /// If `indices` is a scalar the output shape will be a vector of length `depth`. + /// + /// If `indices` is a vector of length `features`, the output shape will be: + /// ``` + /// features x depth if axis == -1 + /// depth x features if axis == 0 + /// ``` + /// + /// If `indices` is a matrix (batch) with shape `[batch, features]`, + /// the output shape will be: + /// ``` + /// batch x features x depth if axis == -1 + /// batch x depth x features if axis == 1 + /// depth x batch x features if axis == 0 + /// ``` + /// + /// + /// Examples + /// ========= + /// + /// Suppose that + /// ``` + /// indices = [0, 2, -1, 1] + /// depth = 3 + /// on_value = 5.0 + /// off_value = 0.0 + /// axis = -1 + /// ``` + /// + /// Then output is `[4 x 3]`: + /// ``` + /// output = + /// [5.0 0.0 0.0] // one_hot(0) + /// [0.0 0.0 5.0] // one_hot(2) + /// [0.0 0.0 0.0] // one_hot(-1) + /// [0.0 5.0 0.0] // one_hot(1) + /// ``` + /// + /// Suppose that + /// ``` + /// indices = [0, 2, -1, 1] + /// depth = 3 + /// on_value = 0.0 + /// off_value = 3.0 + /// axis = 0 + /// ``` + /// + /// Then output is `[3 x 4]`: + /// ``` + /// output = + /// [0.0 3.0 3.0 3.0] + /// [3.0 3.0 3.0 0.0] + /// [3.0 3.0 3.0 3.0] + /// [3.0 0.0 3.0 3.0] + /// // ^ one_hot(0) + /// // ^ one_hot(2) + /// // ^ one_hot(-1) + /// // ^ one_hot(1) + /// ``` + /// + /// Suppose that + /// ``` + /// indices = [[0, 2], [1, -1]] + /// depth = 3 + /// on_value = 1.0 + /// off_value = 0.0 + /// axis = -1 + /// ``` + /// + /// Then output is `[2 x 2 x 3]`: + /// ``` + /// output = + /// [ + /// [1.0, 0.0, 0.0] // one_hot(0) + /// [0.0, 0.0, 1.0] // one_hot(2) + /// ][ + /// [0.0, 1.0, 0.0] // one_hot(1) + /// [0.0, 0.0, 0.0] // one_hot(-1) + /// ] + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + /// The axis to fill (default: -1, a new inner-most axis). + /// + /// + /// + public static Tensor one_hot(Tensor indices, Tensor depth, Tensor on_value, Tensor off_value, int axis = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OneHot", name) { args = new object[] { indices, depth, on_value, off_value }, attrs = new Dictionary() { ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return one_hot_eager_fallback(indices, depth, on_value, off_value, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["indices"] = indices; + keywords["depth"] = depth; + keywords["on_value"] = on_value; + keywords["off_value"] = off_value; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("OneHot", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "axis", _op._get_attr_int("axis"), "T", _op._get_attr_type("T"), "TI", _op._get_attr_type("TI") }; + _execute.record_gradient("OneHot", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor one_hot_eager_fallback(Tensor indices, Tensor depth, Tensor on_value, Tensor off_value, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { indices, depth, on_value, off_value }; + object[] _attrs = new object[] { "axis", axis, "T", on_value.dtype, "TI", indices.dtype }; + var _result = _execute.execute("OneHot", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("OneHot", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a tensor of ones with the same shape and type as x. + /// + /// + /// + public static Tensor ones_like(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OnesLike", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ones_like_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("OnesLike", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("OnesLike", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ones_like_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("OnesLike", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("OnesLike", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. + /// + /// + /// + /// Packs the `N` tensors in `values` into a tensor with rank one higher than each + /// tensor in `values`, by packing them along the `axis` dimension. + /// Given a list of tensors of shape `(A, B, C)`; + /// + /// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + /// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. + /// Etc. + /// + /// For example: + /// + /// ``` + /// # 'x' is [1, 4] + /// # 'y' is [2, 5] + /// # 'z' is [3, 6] + /// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + /// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + /// ``` + /// + /// This is the opposite of `unpack`. + /// + /// + /// + /// + /// + /// Dimension along which to pack. Negative values wrap around, so the + /// valid range is `[-(R+1), R+1)`. + /// + /// + /// + public static Tensor pack(Tensors values, int axis = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pack", name) { args = new object[] { values }, attrs = new Dictionary() { ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return pack_eager_fallback(values, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["values"] = values; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("Pack", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("Pack", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor pack_eager_fallback(Tensors values, int axis, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(values); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "axis", axis }; + var _result = _execute.execute("Pack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Pack", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Pads a tensor with zeros. + /// + /// + /// + /// This operation pads a `input` with zeros according to the `paddings` you + /// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the + /// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many zeros to add before the contents of `input` in that dimension, and + /// `paddings[D, 1]` indicates how many zeros to add after the contents of `input` + /// in that dimension. + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 1], [2, 2]] + /// # 'paddings' is [[1, 1], [2, 2]] + /// # rank of 't' is 2 + /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + /// [0, 0, 1, 1, 0, 0] + /// [0, 0, 2, 2, 0, 0] + /// [0, 0, 0, 0, 0, 0]] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor pad(Tensor input, Tensor paddings, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return pad_eager_fallback(input, paddings, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + var _op = tf.OpDefLib._apply_op_helper("Pad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings") }; + _execute.record_gradient("Pad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor pad_eager_fallback(Tensor input, Tensor paddings, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype }; + var _result = _execute.execute("Pad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Pad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Pads a tensor. + /// + /// + /// + /// This operation pads `input` according to the `paddings` and `constant_values` + /// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is + /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many padding values to add before the contents of `input` in that dimension, + /// and `paddings[D, 1]` indicates how many padding values to add after the contents + /// of `input` in that dimension. `constant_values` is a scalar tensor of the same + /// type as `input` that indicates the value to use for padding `input`. + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 1], [2, 2]] + /// # 'paddings' is [[1, 1], [2, 2]] + /// # 'constant_values' is 0 + /// # rank of 't' is 2 + /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + /// [0, 0, 1, 1, 0, 0] + /// [0, 0, 2, 2, 0, 0] + /// [0, 0, 0, 0, 0, 0]] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor pad_v2(Tensor input, Tensor paddings, Tensor constant_values, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PadV2", name) { args = new object[] { input, paddings, constant_values }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return pad_v2_eager_fallback(input, paddings, constant_values, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["constant_values"] = constant_values; + var _op = tf.OpDefLib._apply_op_helper("PadV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings") }; + _execute.record_gradient("PadV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor pad_v2_eager_fallback(Tensor input, Tensor paddings, Tensor constant_values, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings, constant_values }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype }; + var _result = _execute.execute("PadV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PadV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Concatenates a list of `N` tensors along the first dimension. + /// + /// + /// + /// The input tensors are all required to have size 1 in the first dimension. + /// + /// For example: + /// + /// ``` + /// # 'x' is [[1, 4]] + /// # 'y' is [[2, 5]] + /// # 'z' is [[3, 6]] + /// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + /// ``` + /// + /// The difference between concat and parallel_concat is that concat requires all + /// of the inputs be computed before the operation will begin but doesn't require + /// that the input shapes be known during graph construction. Parallel concat + /// will copy pieces of the input into the output as they become available, in + /// some situations this can provide a performance benefit. + /// + /// + /// + /// + /// + /// the final shape of the result; should be equal to the shapes of any input + /// but with the number of input values in the first dimension. + /// + /// + /// + public static Tensor parallel_concat(Tensors values, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ParallelConcat", name) { args = new object[] { values }, attrs = new Dictionary() { ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return parallel_concat_eager_fallback(values, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["values"] = values; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("ParallelConcat", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("ParallelConcat", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor parallel_concat_eager_fallback(Tensors values, Shape shape, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(values); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "shape", shape }; + var _result = _execute.execute("ParallelConcat", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ParallelConcat", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// A placeholder op for a value that will be fed into the computation. + /// + /// + /// + /// N.B. This operation will fail with an error if it is executed. It is + /// intended as a way to represent a value that will always be fed, and to + /// provide attrs that enable the fed value to be checked at runtime. + /// + /// + /// + /// + /// The type of elements in the tensor. + /// + /// + /// + /// + /// (Optional) The shape of the tensor. If the shape has 0 dimensions, the + /// shape is unconstrained. + /// + /// + /// + public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Placeholder", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return placeholder_eager_fallback(dtype: dtype, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dtype"] = dtype; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("Placeholder", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("Placeholder", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor placeholder_eager_fallback(TF_DataType dtype, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "dtype", dtype, "shape", shape }; + var _result = _execute.execute("Placeholder", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Placeholder", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// A placeholder op for a value that will be fed into the computation. + /// + /// + /// + /// N.B. This operation will fail with an error if it is executed. It is + /// intended as a way to represent a value that will always be fed, and to + /// provide attrs that enable the fed value to be checked at runtime. + /// + /// + /// + /// + /// The type of elements in the tensor. + /// + /// + /// + /// + /// The shape of the tensor. The shape can be any partially-specified + /// shape. To be unconstrained, pass in a shape with unknown rank. + /// + /// + /// + public static Tensor placeholder_v2(TF_DataType dtype, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderV2", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return placeholder_v2_eager_fallback(dtype: dtype, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dtype"] = dtype; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("PlaceholderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("PlaceholderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor placeholder_v2_eager_fallback(TF_DataType dtype, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "dtype", dtype, "shape", shape }; + var _result = _execute.execute("PlaceholderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PlaceholderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// A placeholder op that passes through `input` when its output is not fed. + /// + /// + /// + /// + /// The (possibly partial) shape of the tensor. + /// + /// + /// + public static Tensor placeholder_with_default(Tensor input, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderWithDefault", name) { args = new object[] { input }, attrs = new Dictionary() { ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return placeholder_with_default_eager_fallback(input, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("PlaceholderWithDefault", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor placeholder_with_default_eager_fallback(Tensor input, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "dtype", input.dtype, "shape", shape }; + var _result = _execute.execute("PlaceholderWithDefault", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PlaceholderWithDefault", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// An identity op that triggers an error if a gradient is requested. + /// + /// + /// + /// When executed in a graph, this op outputs its input tensor as-is. + /// + /// When building ops to compute gradients, the TensorFlow gradient system + /// will return an error when trying to lookup the gradient of this op, + /// because no gradient must ever be registered for this function. This + /// op exists to prevent subtle bugs from silently returning unimplemented + /// gradients in some corner cases. + /// + /// + /// + /// + /// + /// Will be printed in the error when anyone tries to differentiate + /// this operation. + /// + /// + /// + public static Tensor prevent_gradient(Tensor input, string message = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PreventGradient", name) { args = new object[] { input }, attrs = new Dictionary() { ["message"] = message } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return prevent_gradient_eager_fallback(input, message: message, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (message is null) + { + message = ""; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["message"] = message; + var _op = tf.OpDefLib._apply_op_helper("PreventGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") }; + _execute.record_gradient("PreventGradient", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor prevent_gradient_eager_fallback(Tensor input, string message, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "message", message }; + var _result = _execute.execute("PreventGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PreventGradient", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Use QuantizeAndDequantizeV2 instead. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor quantize_and_dequantize(Tensor input, bool signed_input = true, int num_bits = 8, bool range_given = false, float input_min = 0f, float input_max = 0f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantize", name) { args = new object[] { input }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["input_min"] = input_min, ["input_max"] = input_max } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantize_and_dequantize_eager_fallback(input, signed_input: signed_input, num_bits: num_bits, range_given: range_given, input_min: input_min, input_max: input_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["signed_input"] = signed_input; + keywords["num_bits"] = num_bits; + keywords["range_given"] = range_given; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "input_min", _op.get_attr("input_min"), "input_max", _op.get_attr("input_max"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("QuantizeAndDequantize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantize_and_dequantize_eager_fallback(Tensor input, bool signed_input, int num_bits, bool range_given, float input_min, float input_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "input_min", input_min, "input_max", input_max, "T", input.dtype }; + var _result = _execute.execute("QuantizeAndDequantize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeAndDequantize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Quantizes then dequantizes a tensor. + /// + /// + /// + /// This op simulates the precision loss from the quantized forward pass by: + /// + /// 1. Quantizing the tensor to fixed point numbers, which should match the target + /// quantization method when it is used in inference. + /// 2. Dequantizing it back to floating point numbers for the following ops, most + /// likely matmul. + /// + /// There are different ways to quantize. This version uses only scaling, so 0.0 + /// maps to 0. + /// + /// From the specified 'num_bits' in the quantized output type, it determines + /// minimum and maximum representable quantized values. + /// + /// e.g. + /// + /// * [-128, 127] for signed, num_bits = 8, or + /// * [0, 255] for unsigned, num_bits = 8. + /// + /// If range_given == False, the initial input_min, input_max will be determined + /// automatically as the minimum and maximum values in the input tensor, otherwise + /// the specified values of input_min, input_max are used. + /// + /// Note: If the input_min, input_max are specified, they do not need to equal the + /// actual minimum and maximum values in the tensor. e.g. in some cases it may be + /// beneficial to specify these values such that the low probability extremes of the + /// input distribution are clipped. + /// + /// This op determines the maximum scale_factor that would map the initial + /// [input_min, input_max] range to a range that lies within the representable + /// quantized range. + /// + /// It determines the scale from one of input_min and input_max, then updates the + /// other one to maximize the representable range. + /// + /// e.g. + /// + /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, + /// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it + /// would update input_max to be 127 / 12.8 = 9.921875 + /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, + /// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it + /// would update input_min to be 128.0 / 12.7 = -10.07874 + /// * if the output is unsigned, input_min is forced to be 0, and only the + /// specified input_max is used. + /// + /// After determining the scale_factor and updating the input range, it applies the + /// following to each value in the 'input' tensor. + /// + /// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. + /// + /// The above round function rounds the value based on the given round_mode. + /// + /// + /// + /// + /// + /// + /// + /// + /// Whether the quantization is signed or unsigned. (actually this parameter should + /// have been called `signed_output`) + /// + /// + /// + /// + /// The bitwidth of the quantization. + /// + /// + /// + /// + /// Whether the range is given or should be determined from the `input` tensor. + /// + /// + /// + /// + /// The 'round_mode' attribute controls which rounding tie-breaking algorithm is + /// used when rounding float values to their quantized equivalents. The following + /// rounding modes are currently supported: + /// + /// * HALF_TO_EVEN: this is the default round_mode. + /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 + /// rounds up to -7. + /// + /// + /// + /// + /// + /// If True, then the absolute value of the quantized minimum value is the same as + /// the quantized maximum value, instead of 1 greater. + /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. + /// + /// + /// + /// + /// If specified, this axis is treated as a channel or slice axis, and a separate + /// quantization range is used for each channel or slice along this axis. + /// + /// + /// + public static Tensor quantize_and_dequantize_v2(Tensor input, Tensor input_min, Tensor input_max, bool signed_input = true, int num_bits = 8, bool range_given = false, string round_mode = "HALF_TO_EVEN", bool narrow_range = false, int axis = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV2", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantize_and_dequantize_v2_eager_fallback(input, input_min, input_max, signed_input: signed_input, num_bits: num_bits, range_given: range_given, round_mode: round_mode, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (round_mode is null) + { + round_mode = "HALF_TO_EVEN"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["signed_input"] = signed_input; + keywords["num_bits"] = num_bits; + keywords["range_given"] = range_given; + keywords["round_mode"] = round_mode; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("QuantizeAndDequantizeV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantize_and_dequantize_v2_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, bool signed_input, int num_bits, bool range_given, string round_mode, bool narrow_range, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "T", input.dtype, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis }; + var _result = _execute.execute("QuantizeAndDequantizeV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Quantizes then dequantizes a tensor. + /// + /// + /// + /// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + /// tensor, so its value can change during training. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor quantize_and_dequantize_v3(Tensor input, Tensor input_min, Tensor input_max, Tensor num_bits, bool signed_input = true, bool range_given = true, bool narrow_range = false, int axis = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV3", name) { args = new object[] { input, input_min, input_max, num_bits }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["range_given"] = range_given, ["narrow_range"] = narrow_range, ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantize_and_dequantize_v3_eager_fallback(input, input_min, input_max, num_bits, signed_input: signed_input, range_given: range_given, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["num_bits"] = num_bits; + keywords["signed_input"] = signed_input; + keywords["range_given"] = range_given; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("QuantizeAndDequantizeV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantize_and_dequantize_v3_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, Tensor num_bits, bool signed_input, bool range_given, bool narrow_range, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max, num_bits }; + object[] _attrs = new object[] { "signed_input", signed_input, "range_given", range_given, "T", input.dtype, "narrow_range", narrow_range, "axis", axis }; + var _result = _execute.execute("QuantizeAndDequantizeV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Quantizes then dequantizes a tensor. + /// + /// + /// + /// This is almost identical to QuantizeAndDequantizeV2, except that it returns a + /// gradient of 1 for inputs that are within the quantization range, or 0 otherwise. + /// + /// + /// + /// + /// + /// + /// + /// Whether the quantization is signed or unsigned. (actually this parameter should + /// have been called `signed_output`) + /// + /// + /// + /// + /// The bitwidth of the quantization. + /// + /// + /// + /// + /// Whether the range is given or should be determined from the `input` tensor. + /// + /// + /// + /// + /// The 'round_mode' attribute controls which rounding tie-breaking algorithm is + /// used when rounding float values to their quantized equivalents. The following + /// rounding modes are currently supported: + /// + /// * HALF_TO_EVEN: this is the default round_mode. + /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 + /// rounds up to -7. + /// + /// + /// + /// + /// + /// If True, then the absolute value of the quantized minimum value is the same as + /// the quantized maximum value, instead of 1 greater. + /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. + /// + /// + /// + /// + /// If specified, this axis is treated as a channel or slice axis, and a separate + /// quantization range is used for each channel or slice along this axis. + /// + /// + /// + public static Tensor quantize_and_dequantize_v4(Tensor input, Tensor input_min, Tensor input_max, bool signed_input = true, int num_bits = 8, bool range_given = false, string round_mode = "HALF_TO_EVEN", bool narrow_range = false, int axis = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV4", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantize_and_dequantize_v4_eager_fallback(input, input_min, input_max, signed_input: signed_input, num_bits: num_bits, range_given: range_given, round_mode: round_mode, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (round_mode is null) + { + round_mode = "HALF_TO_EVEN"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["signed_input"] = signed_input; + keywords["num_bits"] = num_bits; + keywords["range_given"] = range_given; + keywords["round_mode"] = round_mode; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV4", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("QuantizeAndDequantizeV4", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantize_and_dequantize_v4_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, bool signed_input, int num_bits, bool range_given, string round_mode, bool narrow_range, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "T", input.dtype, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis }; + var _result = _execute.execute("QuantizeAndDequantizeV4", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeAndDequantizeV4", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. + /// + /// + /// + /// [min_range, max_range] are scalar floats that specify the range for + /// the 'input' data. The 'mode' attribute controls exactly which calculations are + /// used to convert the float values to their quantized equivalents. The + /// 'round_mode' attribute controls which rounding tie-breaking algorithm is used + /// when rounding float values to their quantized equivalents. + /// + /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + /// + /// ``` + /// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + /// if T == qint8: out[i] -= (range(T) + 1) / 2.0 + /// ``` + /// + /// here `range(T) = numeric_limits::max() - numeric_limits::min()` + /// + /// *MIN_COMBINED Mode Example* + /// + /// Assume the input is type float and has a possible range of [0.0, 6.0] and the + /// output type is quint8 ([0, 255]). The min_range and max_range values should be + /// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + /// value of the input by 255/6 and cast to quint8. + /// + /// If the output type was qint8 ([-128, 127]), the operation will additionally + /// subtract each value by 128 prior to casting, so that the range of values aligns + /// with the range of qint8. + /// + /// If the mode is 'MIN_FIRST', then this approach is used: + /// + /// ``` + /// num_discrete_values = 1 << (# of bits in T) + /// range_adjust = num_discrete_values / (num_discrete_values - 1) + /// range = (range_max - range_min) * range_adjust + /// range_scale = num_discrete_values / range + /// quantized = round(input * range_scale) - round(range_min * range_scale) + + /// numeric_limits::min() + /// quantized = max(quantized, numeric_limits::min()) + /// quantized = min(quantized, numeric_limits::max()) + /// ``` + /// + /// The biggest difference between this and MIN_COMBINED is that the minimum range + /// is rounded first, before it's subtracted from the rounded value. With + /// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + /// and dequantizing will introduce a larger and larger error. + /// + /// *SCALED mode Example* + /// + /// `SCALED` mode matches the quantization approach used in + /// `QuantizeAndDequantize{V2|V3}`. + /// + /// If the mode is `SCALED`, the quantization is performed by multiplying each + /// input value by a scaling_factor. + /// The scaling_factor is determined from `min_range` and `max_range` to be as large + /// as possible such that the range from `min_range` to `max_range` is representable + /// within values of type T. + /// + /// ```c++ + /// + /// const int min_T = std::numeric_limits::min(); + /// const int max_T = std::numeric_limits::max(); + /// const float max_float = std::numeric_limits::max(); + /// + /// const float scale_factor_from_min_side = + /// (min_T * min_range > 0) ? min_T / min_range : max_float; + /// const float scale_factor_from_max_side = + /// (max_T * max_range > 0) ? max_T / max_range : max_float; + /// + /// const float scale_factor = std::min(scale_factor_from_min_side, + /// scale_factor_from_max_side); + /// ``` + /// + /// We next use the scale_factor to adjust min_range and max_range as follows: + /// + /// ```c++ + /// min_range = min_T / scale_factor; + /// max_range = max_T / scale_factor; + /// ``` + /// + /// + /// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + /// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 + /// In this case, min_range would remain -10, but max_range would be adjusted to + /// 127 / 12.8 = 9.921875 + /// + /// So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + /// + /// The input tensor can now be quantized by clipping values to the range + /// `min_range` to `max_range`, then multiplying by scale_factor as follows: + /// + /// ```c++ + /// result = round(min(max_range, max(min_range, input)) * scale_factor) + /// ``` + /// + /// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + /// this operation. These outputs should be used as the range for any further + /// calculations. + /// + /// + /// *narrow_range (bool) attribute* + /// + /// If true, we do not use the minimum quantized value. + /// i.e. for int8 the quantized output, it would be restricted to the range + /// -127..127 instead of the full -128..127 range. + /// This is provided for compatibility with certain inference backends. + /// (Only applies to SCALED mode) + /// + /// + /// *axis (int) attribute* + /// + /// An optional `axis` attribute can specify a dimension index of the input tensor, + /// such that quantization ranges will be calculated and applied separately for each + /// slice of the tensor along that dimension. This is useful for per-channel + /// quantization. + /// + /// If axis is specified, min_range and max_range + /// + /// if `axis`=None, per-tensor quantization is performed as normal. + /// + /// + /// *ensure_minimum_range (float) attribute* + /// + /// Ensures the minimum quantization range is at least this value. + /// The legacy default value for this is 0.01, but it is strongly suggested to + /// set it to 0 for new uses. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantize_v2(Tensor input, Tensor min_range, Tensor max_range, TF_DataType T, string mode = "MIN_COMBINED", string round_mode = "HALF_AWAY_FROM_ZERO", bool narrow_range = false, int axis = -1, float ensure_minimum_range = 0.01f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeV2", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary() { ["T"] = T, ["mode"] = mode, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["ensure_minimum_range"] = ensure_minimum_range } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantize_v2_eager_fallback(input, min_range, max_range, T: T, mode: mode, round_mode: round_mode, narrow_range: narrow_range, axis: axis, ensure_minimum_range: ensure_minimum_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (mode is null) + { + mode = "MIN_COMBINED"; + } + if (round_mode is null) + { + round_mode = "HALF_AWAY_FROM_ZERO"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["min_range"] = min_range; + keywords["max_range"] = max_range; + keywords["T"] = T; + keywords["mode"] = mode; + keywords["round_mode"] = round_mode; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + keywords["ensure_minimum_range"] = ensure_minimum_range; + var _op = tf.OpDefLib._apply_op_helper("QuantizeV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis"), "ensure_minimum_range", _op.get_attr("ensure_minimum_range") }; + _execute.record_gradient("QuantizeV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantize_v2_eager_fallback(Tensor input, Tensor min_range, Tensor max_range, TF_DataType T, string mode, string round_mode, bool narrow_range, int axis, float ensure_minimum_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, min_range, max_range }; + object[] _attrs = new object[] { "T", T, "mode", mode, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis, "ensure_minimum_range", ensure_minimum_range }; + var _result = _execute.execute("QuantizeV2", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Concatenates quantized tensors along one dimension. + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_concat(Tensor concat_dim, Tensors values, Tensors input_mins, Tensors input_maxes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConcat", name) { args = new object[] { concat_dim, values, input_mins, input_maxes }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_concat_eager_fallback(concat_dim, values, input_mins, input_maxes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["concat_dim"] = concat_dim; + keywords["values"] = values; + keywords["input_mins"] = input_mins; + keywords["input_maxes"] = input_maxes; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConcat", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("QuantizedConcat", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_concat_eager_fallback(Tensor concat_dim, Tensors values, Tensors input_mins, Tensors input_maxes, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.Add(concat_dim); + _inputs_flat_list.AddRange(values); + _inputs_flat_list.AddRange(input_mins); + _inputs_flat_list.AddRange(input_maxes); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype }; + var _result = _execute.execute("QuantizedConcat", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConcat", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Quantized Instance normalization. + /// + /// + /// + /// + /// + /// + /// If True, `given_y_min` and `given_y_min` + /// and `given_y_max` are used as the output range. Otherwise, + /// the implementation computes the output range. + /// + /// + /// + /// + /// Output in `y_min` if `output_range_given` is True. + /// + /// + /// + /// + /// Output in `y_max` if `output_range_given` is True. + /// + /// + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// + /// + /// Minimum value of `y_max - y_min` + /// + /// + /// + public static Tensor[] quantized_instance_norm(Tensor x, Tensor x_min, Tensor x_max, bool output_range_given = false, float given_y_min = 0f, float given_y_max = 0f, float variance_epsilon = 1E-05f, float min_separation = 0.001f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedInstanceNorm", name) { args = new object[] { x, x_min, x_max }, attrs = new Dictionary() { ["output_range_given"] = output_range_given, ["given_y_min"] = given_y_min, ["given_y_max"] = given_y_max, ["variance_epsilon"] = variance_epsilon, ["min_separation"] = min_separation } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_instance_norm_eager_fallback(x, x_min, x_max, output_range_given: output_range_given, given_y_min: given_y_min, given_y_max: given_y_max, variance_epsilon: variance_epsilon, min_separation: min_separation, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["x_min"] = x_min; + keywords["x_max"] = x_max; + keywords["output_range_given"] = output_range_given; + keywords["given_y_min"] = given_y_min; + keywords["given_y_max"] = given_y_max; + keywords["variance_epsilon"] = variance_epsilon; + keywords["min_separation"] = min_separation; + var _op = tf.OpDefLib._apply_op_helper("QuantizedInstanceNorm", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "output_range_given", _op._get_attr_bool("output_range_given"), "given_y_min", _op.get_attr("given_y_min"), "given_y_max", _op.get_attr("given_y_max"), "variance_epsilon", _op.get_attr("variance_epsilon"), "min_separation", _op.get_attr("min_separation") }; + _execute.record_gradient("QuantizedInstanceNorm", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_instance_norm_eager_fallback(Tensor x, Tensor x_min, Tensor x_max, bool output_range_given, float given_y_min, float given_y_max, float variance_epsilon, float min_separation, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, x_min, x_max }; + object[] _attrs = new object[] { "T", x.dtype, "output_range_given", output_range_given, "given_y_min", given_y_min, "given_y_max", given_y_max, "variance_epsilon", variance_epsilon, "min_separation", min_separation }; + var _result = _execute.execute("QuantizedInstanceNorm", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedInstanceNorm", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Reshapes a quantized tensor as per the Reshape op. + /// + /// + /// + /// ``` + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_reshape(Tensor tensor, Tensor shape, Tensor input_min, Tensor input_max, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReshape", name) { args = new object[] { tensor, shape, input_min, input_max }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_reshape_eager_fallback(tensor, shape, input_min, input_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["shape"] = shape; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + var _op = tf.OpDefLib._apply_op_helper("QuantizedReshape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tshape", _op._get_attr_type("Tshape") }; + _execute.record_gradient("QuantizedReshape", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_reshape_eager_fallback(Tensor tensor, Tensor shape, Tensor input_min, Tensor input_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, shape, input_min, input_max }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tshape", shape.dtype }; + var _result = _execute.execute("QuantizedReshape", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedReshape", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns the rank of a tensor. + /// + /// + /// + /// This operation returns an integer representing the rank of `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// # shape of tensor 't' is [2, 2, 3] + /// rank(t) ==> 3 + /// ``` + /// + /// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank + /// of a tensor is the number of indices required to uniquely select each element + /// of the tensor. Rank is also known as "order", "degree", or "ndims." + /// + /// + /// + /// + public static Tensor rank(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rank", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return rank_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Rank", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Rank", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor rank_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Rank", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Rank", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return the same ref tensor as the input ref tensor. + /// + /// + /// + public static Tensor ref_identity(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("ref_identity op does not support eager execution. Arg input is a ref."); + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("RefIdentity", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("RefIdentity", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ref_identity_eager_fallback(Tensor input, string name, Context ctx) + { + throw new RuntimeError($"ref_identity op does not support eager execution. Arg 'input' is a ref."); + } + /// + /// Reshapes a tensor. + /// + /// + /// + /// Given `tensor`, this operation returns a tensor that has the same values + /// as `tensor` with shape `shape`. + /// + /// If one component of 1-D tensor `shape` is the special value -1, the size of that + /// dimension is computed so that the total size remains constant. In particular, a + /// `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be + /// unknown. + /// + /// The `shape` must be 1-D and the operation returns a tensor with shape + /// `shape` filled with the values of `tensor`. In this case, the number of elements + /// implied by `shape` must be the same as the number of elements in `tensor`. + /// + /// It is an error if `shape` is not 1-D. + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] + /// # tensor 't' has shape [9] + /// reshape(t, [3, 3]) ==> [[1, 2, 3], + /// [4, 5, 6], + /// [7, 8, 9]] + /// + /// # tensor 't' is [[[1, 1], [2, 2]], + /// # [[3, 3], [4, 4]]] + /// # tensor 't' has shape [2, 2, 2] + /// reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + /// [3, 3, 4, 4]] + /// + /// # tensor 't' is [[[1, 1, 1], + /// # [2, 2, 2]], + /// # [[3, 3, 3], + /// # [4, 4, 4]], + /// # [[5, 5, 5], + /// # [6, 6, 6]]] + /// # tensor 't' has shape [3, 2, 3] + /// # pass '[-1]' to flatten 't' + /// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] + /// + /// # -1 can also be used to infer the shape + /// + /// # -1 is inferred to be 9: + /// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] + /// # -1 is inferred to be 2: + /// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] + /// # -1 is inferred to be 3: + /// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + /// [2, 2, 2], + /// [3, 3, 3]], + /// [[4, 4, 4], + /// [5, 5, 5], + /// [6, 6, 6]]] + /// + /// # tensor 't' is [7] + /// # shape `[]` reshapes to a scalar + /// reshape(t, []) ==> 7 + /// ``` + /// + /// + /// + /// + /// + public static Tensor reshape(Tensor tensor, Tensor shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reshape", name) { args = new object[] { tensor, shape }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reshape_eager_fallback(tensor, shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("Reshape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tshape", _op._get_attr_type("Tshape") }; + _execute.record_gradient("Reshape", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reshape_eager_fallback(Tensor tensor, Tensor shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, shape }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tshape", shape.dtype }; + var _result = _execute.execute("Reshape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Reshape", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Assign `value` to the sliced l-value reference of `ref`. + /// + /// + /// + /// The values of `value` are assigned to the positions in the variable + /// `ref` that are selected by the slice parameters. The slice parameters + /// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + /// + /// NOTE this op currently does not support broadcasting and so `value`'s + /// shape must be exactly the shape produced by the slice of `ref`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Operation resource_strided_slice_assign(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceStridedSliceAssign", name) { args = new object[] { ref_, begin, end, strides, value }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); + return null; + } + catch (Exception) + { + } + try + { + return resource_strided_slice_assign_eager_fallback(ref_, begin, end, strides, value, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["ref"] = ref_; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["value"] = value; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("ResourceStridedSliceAssign", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("ResourceStridedSliceAssign", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Operation resource_strided_slice_assign_eager_fallback(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { ref_, begin, end, strides, value }; + object[] _attrs = new object[] { "T", value.dtype, "Index", begin.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask }; + var _result = _execute.execute("ResourceStridedSliceAssign", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceStridedSliceAssign", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Reverses specific dimensions of a tensor. + /// + /// + /// + /// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions + /// of `tensor`, this operation reverses each dimension i of `tensor` where + /// `dims[i]` is `True`. + /// + /// `tensor` can have up to 8 dimensions. The number of dimensions + /// of `tensor` must equal the number of elements in `dims`. In other words: + /// + /// `rank(tensor) = size(dims)` + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [False, False, False, True] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is [False, True, False, False] + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is [False, False, True, False] + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// ``` + /// + /// + /// + /// + /// + public static Tensor reverse(Tensor tensor, Tensor dims, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reverse", name) { args = new object[] { tensor, dims }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reverse_eager_fallback(tensor, dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["dims"] = dims; + var _op = tf.OpDefLib._apply_op_helper("Reverse", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Reverse", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reverse_eager_fallback(Tensor tensor, Tensor dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, dims }; + object[] _attrs = new object[] { "T", tensor.dtype }; + var _result = _execute.execute("Reverse", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Reverse", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Reverses variable length slices. + /// + /// + /// + /// This op first slices `input` along the dimension `batch_dim`, and for each + /// slice `i`, reverses the first `seq_lengths[i]` elements along + /// the dimension `seq_dim`. + /// + /// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, + /// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + /// + /// The output slice `i` along dimension `batch_dim` is then given by input + /// slice `i`, with the first `seq_lengths[i]` slices along dimension + /// `seq_dim` reversed. + /// + /// For example: + /// + /// ``` + /// # Given this: + /// batch_dim = 0 + /// seq_dim = 1 + /// input.dims = (4, 8, ...) + /// seq_lengths = [7, 2, 3, 5] + /// + /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: + /// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + /// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + /// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + /// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] + /// + /// # while entries past seq_lens are copied through: + /// output[0, 7:, :, ...] = input[0, 7:, :, ...] + /// output[1, 2:, :, ...] = input[1, 2:, :, ...] + /// output[2, 3:, :, ...] = input[2, 3:, :, ...] + /// output[3, 2:, :, ...] = input[3, 2:, :, ...] + /// ``` + /// + /// In contrast, if: + /// + /// ``` + /// # Given this: + /// batch_dim = 2 + /// seq_dim = 0 + /// input.dims = (8, ?, 4, ...) + /// seq_lengths = [7, 2, 3, 5] + /// + /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: + /// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + /// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + /// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + /// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] + /// + /// # while entries past seq_lens are copied through: + /// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + /// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + /// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + /// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + /// ``` + /// + /// + /// + /// + /// + /// + /// The dimension which is partially reversed. + /// + /// + /// + /// + /// The dimension along which reversal is performed. + /// + /// + /// + public static Tensor reverse_sequence(Tensor input, Tensor seq_lengths, int seq_dim = 0, int batch_dim = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReverseSequence", name) { args = new object[] { input, seq_lengths }, attrs = new Dictionary() { ["seq_dim"] = seq_dim, ["batch_dim"] = batch_dim } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reverse_sequence_eager_fallback(input, seq_lengths, seq_dim: seq_dim, batch_dim: batch_dim, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["seq_lengths"] = seq_lengths; + keywords["seq_dim"] = seq_dim; + keywords["batch_dim"] = batch_dim; + var _op = tf.OpDefLib._apply_op_helper("ReverseSequence", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "seq_dim", _op._get_attr_int("seq_dim"), "batch_dim", _op._get_attr_int("batch_dim"), "T", _op._get_attr_type("T"), "Tlen", _op._get_attr_type("Tlen") }; + _execute.record_gradient("ReverseSequence", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reverse_sequence_eager_fallback(Tensor input, Tensor seq_lengths, int seq_dim, int batch_dim, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, seq_lengths }; + object[] _attrs = new object[] { "seq_dim", seq_dim, "batch_dim", batch_dim, "T", input.dtype, "Tlen", seq_lengths.dtype }; + var _result = _execute.execute("ReverseSequence", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReverseSequence", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Reverses specific dimensions of a tensor. + /// + /// + /// + /// Given a `tensor`, and a `int32` tensor `axis` representing the set of + /// dimensions of `tensor` to reverse. This operation reverses each dimension + /// `i` for which there exists `j` s.t. `axis[j] == i`. + /// + /// `tensor` can have up to 8 dimensions. The number of dimensions specified + /// in `axis` may be 0 or more entries. If an index is specified more than + /// once, a InvalidArgument error is raised. + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [3] or 'dims' is [-1] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is '[1]' (or 'dims' is '[-3]') + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is '[2]' (or 'dims' is '[-2]') + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// ``` + /// + /// + /// + /// + /// + public static Tensor reverse_v2(Tensor tensor, Tensor axis, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReverseV2", name) { args = new object[] { tensor, axis }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reverse_v2_eager_fallback(tensor, axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("ReverseV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("ReverseV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reverse_v2_eager_fallback(Tensor tensor, Tensor axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, axis }; + object[] _attrs = new object[] { "Tidx", axis.dtype, "T", tensor.dtype }; + var _result = _execute.execute("ReverseV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReverseV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Scatters `updates` into a tensor of shape `shape` according to `indices`. + /// + /// + /// + /// Scatter sparse `updates` according to individual values at the specified + /// `indices`. This op returns an output tensor with the `shape` you specify. This + /// op is the inverse of the `tf.gather_nd` operator which extracts values or slices + /// from a given tensor. + /// + /// This operation is similar to `tf.tensor_scatter_nd_add`, except that the tensor + /// is zero-initialized. Calling `tf.scatter_nd(indices, updates, shape)` + /// is identical to calling + /// `tf.tensor_scatter_nd_add(tf.zeros(shape, updates.dtype), indices, updates)` + /// + /// If `indices` contains duplicates, the associated `updates` are accumulated + /// (summed) into the output tensor. + /// + /// **WARNING**: For floating-point data types, the output may be nondeterministic. + /// This is because the order in which the updates are applied is nondeterministic + /// and when floating-point numbers are added in different orders the resulting + /// numerical approximation error can be slightly different. However, the output + /// will be deterministic if op determinism is enabled via + /// `tf.config.experimental.enable_op_determinism`. + /// + /// `indices` is an integer tensor containing indices into the output tensor. The + /// last dimension of `indices` can be at most the rank of `shape`: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of `indices` corresponds to indices of elements + /// (if `indices.shape[-1] = shape.rank`) or slices + /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + /// `shape`. + /// + /// `updates` is a tensor with shape: + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of the scatter op is to insert individual elements in + /// a tensor by index. Consider an example where you want to insert 4 scattered + /// elements in a rank-1 tensor with 8 elements. + /// + ///
+ /// + ///
+ /// + /// In Python, this scatter operation would look like this: + /// + /// ```python + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// shape = tf.constant([8]) + /// scatter = tf.scatter_nd(indices, updates, shape) + /// print(scatter) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [0, 11, 0, 10, 9, 0, 0, 12] + /// + /// You can also insert entire slices of a higher rank tensor all at once. For + /// example, you can insert two slices in the first dimension of a rank-3 tensor + /// with two matrices of new values. + /// + ///
+ /// + ///
+ /// + /// In Python, this scatter operation would look like this: + /// + /// ```python + /// indices = tf.constant([[1], [3]]) + /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// shape = tf.constant([4, 4, 4]) + /// scatter = tf.scatter_nd(indices, updates, shape) + /// print(scatter) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + ///
+ /// + /// + /// + /// + public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ScatterNd", name) { args = new object[] { indices, updates, shape }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return scatter_nd_eager_fallback(indices, updates, shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["indices"] = indices; + keywords["updates"] = updates; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("ScatterNd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ScatterNd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor scatter_nd_eager_fallback(Tensor indices, Tensor updates, Tensor shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { indices, updates, shape }; + object[] _attrs = new object[] { "T", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ScatterNd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ScatterNd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Applies sparse addition to `input` using individual values or slices + /// + /// + /// + /// from `updates` according to indices `indices`. The updates are non-aliasing: + /// `input` is only modified in-place if no other operations will use it. + /// Otherwise, a copy of `input` is made. This operation has a gradient with + /// respect to both `input` and `updates`. + /// + /// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + /// + /// `indices` must be integer tensor, containing indices into `input`. + /// It must be shape \([d_0, ..., d_{Q-2}, K]\) where `0 < K <= P`. + /// + /// The innermost dimension of `indices` (with length `K`) corresponds to + /// indices into elements (if `K = P`) or `(P-K)`-dimensional slices + /// (if `K < P`) along the `K`th dimension of `input`. + /// + /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: + /// + /// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + /// + /// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + /// elements. In Python, that addition would look like this: + /// + /// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + /// with tf.Session() as sess: + /// print(sess.run(output)) + /// + /// The resulting value `output` would look like this: + /// + /// [1, 13, 3, 14, 14, 6, 7, 20] + /// + /// See `tf.scatter_nd` for more details about how to make updates to slices. + /// + /// + /// + /// + /// + /// + public static Tensor scatter_nd_non_aliasing_add(Tensor input, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ScatterNdNonAliasingAdd", name) { args = new object[] { input, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return scatter_nd_non_aliasing_add_eager_fallback(input, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ScatterNdNonAliasingAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ScatterNdNonAliasingAdd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor scatter_nd_non_aliasing_add_eager_fallback(Tensor input, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, indices, updates }; + object[] _attrs = new object[] { "T", input.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ScatterNdNonAliasingAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ScatterNdNonAliasingAdd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the shape of a tensor. + /// + /// + /// + /// This operation returns a 1-D integer tensor representing the shape of `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// shape(t) ==> [2, 2, 3] + /// ``` + /// + /// + /// + /// + /// + public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Shape", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return shape_eager_fallback(input, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("Shape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("Shape", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor shape_eager_fallback(Tensor input, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "out_type", out_type }; + var _result = _execute.execute("Shape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Shape", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns shape of tensors. + /// + /// + /// + /// This operation returns N 1-D integer tensors representing shape of `input[i]s`. + /// + /// + /// + /// + /// + public static Tensor[] shape_n(Tensors input, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShapeN", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return shape_n_eager_fallback(input, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("ShapeN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("ShapeN", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] shape_n_eager_fallback(Tensors input, TF_DataType out_type, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(input); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", input.Length, "T", input.dtype, "out_type", out_type }; + var _result = _execute.execute("ShapeN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ShapeN", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns the size of a tensor. + /// + /// + /// + /// This operation returns an integer representing the number of elements in + /// `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + /// size(t) ==> 12 + /// ``` + /// + /// + /// + /// + /// + public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Size", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return size_eager_fallback(input, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("Size", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("Size", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor size_eager_fallback(Tensor input, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "out_type", out_type }; + var _result = _execute.execute("Size", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Size", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return a slice from 'input'. + /// + /// + /// + /// The output tensor is a tensor with dimensions described by 'size' + /// whose values are extracted from 'input' starting at the offsets in + /// 'begin'. + /// + /// *Requirements*: + /// 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + /// + /// + /// + /// + /// + /// + public static Tensor slice(Tensor input, Tensor begin, Tensor size, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Slice", name) { args = new object[] { input, begin, size }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return slice_eager_fallback(input, begin, size, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["begin"] = begin; + keywords["size"] = size; + var _op = tf.OpDefLib._apply_op_helper("Slice", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index") }; + _execute.record_gradient("Slice", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor slice_eager_fallback(Tensor input, Tensor begin, Tensor size, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, begin, size }; + object[] _attrs = new object[] { "T", input.dtype, "Index", begin.dtype }; + var _result = _execute.execute("Slice", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Slice", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a copy of the input tensor. + /// + /// + /// + public static Tensor snapshot(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Snapshot", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return snapshot_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Snapshot", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Snapshot", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor snapshot_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Snapshot", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Snapshot", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// SpaceToBatch for 4-D tensors of type T. + /// + /// + /// + /// This is a legacy version of the more general SpaceToBatchND. + /// + /// Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + /// More specifically, this op outputs a copy of the input tensor where values from + /// the `height` and `width` dimensions are moved to the `batch` dimension. After + /// the zero-padding, both `height` and `width` of the input must be divisible by the + /// block size. + /// + /// The attr `block_size` must be greater than one. It indicates the block size. + /// + /// * Non-overlapping blocks of size `block_size x block size` in the height and + /// width dimensions are rearranged into the batch dimension at each location. + /// * The batch of the output tensor is `batch * block_size * block_size`. + /// * Both height_pad and width_pad must be divisible by block_size. + /// + /// The shape of the output will be: + /// + /// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + /// depth] + /// + /// Some examples: + /// + /// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2]], [[3], [4]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 1]` and value: + /// + /// ``` + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// ``` + /// + /// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 3]` and value: + /// + /// ``` + /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + /// ``` + /// + /// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[4, 2, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + /// + /// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[8, 1, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + /// ``` + /// + /// Among others, this operation is useful for reducing atrous convolution into + /// regular convolution. + /// + /// + /// + /// + /// + /// + public static Tensor space_to_batch(Tensor input, Tensor paddings, int block_size = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToBatch", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["block_size"] = block_size } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return space_to_batch_eager_fallback(input, paddings, block_size: block_size, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["block_size"] = block_size; + var _op = tf.OpDefLib._apply_op_helper("SpaceToBatch", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "block_size", _op._get_attr_int("block_size") }; + _execute.record_gradient("SpaceToBatch", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor space_to_batch_eager_fallback(Tensor input, Tensor paddings, int block_size, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype, "block_size", block_size }; + var _result = _execute.execute("SpaceToBatch", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SpaceToBatch", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// SpaceToBatch for N-D tensors of type T. + /// + /// + /// + /// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + /// grid of blocks of shape `block_shape`, and interleaves these blocks with the + /// "batch" dimension (0) such that in the output, the spatial dimensions + /// `[1, ..., M]` correspond to the position within the grid, and the batch + /// dimension combines both the position within a spatial block and the original + /// batch position. Prior to division into blocks, the spatial dimensions of the + /// input are optionally zero padded according to `paddings`. See below for a + /// precise description. + /// + /// This operation is equivalent to the following steps: + /// + /// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the + /// input according to `paddings` to produce `padded` of shape `padded_shape`. + /// + /// 2. Reshape `padded` to `reshaped_padded` of shape: + /// + /// [batch] + + /// [padded_shape[1] / block_shape[0], + /// block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1], + /// block_shape[M-1]] + + /// remaining_shape + /// + /// 3. Permute dimensions of `reshaped_padded` to produce + /// `permuted_reshaped_padded` of shape: + /// + /// block_shape + + /// [batch] + + /// [padded_shape[1] / block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1]] + + /// remaining_shape + /// + /// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch + /// dimension, producing an output tensor of shape: + /// + /// [batch * prod(block_shape)] + + /// [padded_shape[1] / block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1]] + + /// remaining_shape + /// + /// Some examples: + /// + /// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1], [2]], [[3], [4]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 1]` and value: + /// + /// ``` + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// ``` + /// + /// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 3]` and value: + /// + /// ``` + /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + /// ``` + /// + /// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[4, 2, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + /// + /// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and + /// paddings = `[[0, 0], [2, 0]]`: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[8, 1, 3, 1]` and value: + /// + /// ``` + /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + /// [[[0], [2], [4]]], [[[0], [10], [12]]], + /// [[[0], [5], [7]]], [[[0], [13], [15]]], + /// [[[0], [6], [8]]], [[[0], [14], [16]]]] + /// ``` + /// + /// Among others, this operation is useful for reducing atrous convolution into + /// regular convolution. + /// + /// + /// + /// + /// + /// + public static Tensor space_to_batch_nd(Tensor input, Tensor block_shape, Tensor paddings, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToBatchND", name) { args = new object[] { input, block_shape, paddings }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return space_to_batch_nd_eager_fallback(input, block_shape, paddings, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["block_shape"] = block_shape; + keywords["paddings"] = paddings; + var _op = tf.OpDefLib._apply_op_helper("SpaceToBatchND", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tblock_shape", _op._get_attr_type("Tblock_shape"), "Tpaddings", _op._get_attr_type("Tpaddings") }; + _execute.record_gradient("SpaceToBatchND", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor space_to_batch_nd_eager_fallback(Tensor input, Tensor block_shape, Tensor paddings, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, block_shape, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tblock_shape", block_shape.dtype, "Tpaddings", paddings.dtype }; + var _result = _execute.execute("SpaceToBatchND", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SpaceToBatchND", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// SpaceToDepth for tensors of type T. + /// + /// + /// + /// Rearranges blocks of spatial data, into depth. More specifically, + /// this op outputs a copy of the input tensor where values from the `height` + /// and `width` dimensions are moved to the `depth` dimension. + /// The attr `block_size` indicates the input block size. + /// + /// * Non-overlapping blocks of size `block_size x block size` are rearranged + /// into depth at each location. + /// * The depth of the output tensor is `block_size * block_size * input_depth`. + /// * The Y, X coordinates within each block of the input become the high order + /// component of the output channel index. + /// * The input tensor's height and width must be divisible by block_size. + /// + /// The `data_format` attr specifies the layout of the input and output tensors + /// with the following options: + /// "NHWC": `[ batch, height, width, channels ]` + /// "NCHW": `[ batch, channels, height, width ]` + /// "NCHW_VECT_C": + /// `qint8 [ batch, channels / 4, height, width, 4 ]` + /// + /// It is useful to consider the operation as transforming a 6-D Tensor. + /// e.g. for data_format = NHWC, + /// Each element in the input tensor can be specified via 6 coordinates, + /// ordered by decreasing memory layout significance as: + /// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + /// within the output image, bX, bY means coordinates + /// within the input block, iC means input channels). + /// The output would be a transpose to the following layout: + /// n,oY,oX,bY,bX,iC + /// + /// This operation is useful for resizing the activations between convolutions + /// (but keeping all data), e.g. instead of pooling. It is also useful for training + /// purely convolutional models. + /// + /// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and + /// block_size = 2: + /// + /// ``` + /// x = [[[[1], [2]], + /// [[3], [4]]]] + /// ``` + /// + /// This operation will output a tensor of shape `[1, 1, 1, 4]`: + /// + /// ``` + /// [[[[1, 2, 3, 4]]]] + /// ``` + /// + /// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + /// the corresponding output will have a single element (i.e. width and height are + /// both 1) and will have a depth of 4 channels (1 * block_size * block_size). + /// The output element shape is `[1, 1, 4]`. + /// + /// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// This operation, for block_size of 2, will return the following tensor of shape + /// `[1, 1, 1, 12]` + /// + /// ``` + /// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + /// ``` + /// + /// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: + /// + /// ``` + /// x = [[[[1], [2], [5], [6]], + /// [[3], [4], [7], [8]], + /// [[9], [10], [13], [14]], + /// [[11], [12], [15], [16]]]] + /// ``` + /// + /// the operator will return the following tensor of shape `[1 2 2 4]`: + /// + /// ``` + /// x = [[[[1, 2, 3, 4], + /// [5, 6, 7, 8]], + /// [[9, 10, 11, 12], + /// [13, 14, 15, 16]]]] + /// ``` + /// + /// + /// + /// + /// + /// The size of the spatial block. + /// + /// + /// + /// + public static Tensor space_to_depth(Tensor input, int block_size = 0, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToDepth", name) { args = new object[] { input }, attrs = new Dictionary() { ["block_size"] = block_size, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return space_to_depth_eager_fallback(input, block_size: block_size, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["block_size"] = block_size; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("SpaceToDepth", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("SpaceToDepth", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor space_to_depth_eager_fallback(Tensor input, int block_size, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "block_size", block_size, "data_format", data_format }; + var _result = _execute.execute("SpaceToDepth", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SpaceToDepth", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Splits a tensor into `num_split` tensors along one dimension. + /// + /// + /// + /// + /// + /// The number of ways to split. Must evenly divide + /// `value.shape[split_dim]`. + /// + /// + /// + public static Tensor[] split(Tensor split_dim, Tensor value, int num_split = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Split", name) { args = new object[] { split_dim, value }, attrs = new Dictionary() { ["num_split"] = num_split } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return split_eager_fallback(split_dim, value, num_split: num_split, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["split_dim"] = split_dim; + keywords["value"] = value; + keywords["num_split"] = num_split; + var _op = tf.OpDefLib._apply_op_helper("Split", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_split", _op._get_attr_int("num_split"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("Split", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] split_eager_fallback(Tensor split_dim, Tensor value, int num_split, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { split_dim, value }; + object[] _attrs = new object[] { "num_split", num_split, "T", value.dtype }; + var _result = _execute.execute("Split", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Split", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Splits a tensor into `num_split` tensors along one dimension. + /// + /// + /// + /// + /// + /// + public static Tensor[] split_v(Tensor value, Tensor size_splits, Tensor split_dim, int num_split = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SplitV", name) { args = new object[] { value, size_splits, split_dim }, attrs = new Dictionary() { ["num_split"] = num_split } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return split_v_eager_fallback(value, size_splits, split_dim, num_split: num_split, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["size_splits"] = size_splits; + keywords["split_dim"] = split_dim; + keywords["num_split"] = num_split; + var _op = tf.OpDefLib._apply_op_helper("SplitV", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_split", _op._get_attr_int("num_split"), "T", _op._get_attr_type("T"), "Tlen", _op._get_attr_type("Tlen") }; + _execute.record_gradient("SplitV", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] split_v_eager_fallback(Tensor value, Tensor size_splits, Tensor split_dim, int num_split, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value, size_splits, split_dim }; + object[] _attrs = new object[] { "num_split", num_split, "T", value.dtype, "Tlen", size_splits.dtype }; + var _result = _execute.execute("SplitV", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SplitV", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Removes dimensions of size 1 from the shape of a tensor. + /// + /// + /// + /// Given a tensor `input`, this operation returns a tensor of the same type with + /// all dimensions of size 1 removed. If you don't want to remove all size 1 + /// dimensions, you can remove specific size 1 dimensions by specifying + /// `squeeze_dims`. + /// + /// For example: + /// + /// ``` + /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + /// shape(squeeze(t)) ==> [2, 3] + /// ``` + /// + /// Or, to remove specific size 1 dimensions: + /// + /// ``` + /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + /// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + /// ``` + /// + /// + /// + /// + /// + /// If specified, only squeezes the dimensions listed. The dimension + /// index starts at 0. It is an error to squeeze a dimension that is not 1. Must + /// be in the range `[-rank(input), rank(input))`. + /// + /// + /// + public static Tensor squeeze(Tensor input, int[] squeeze_dims = null, string? name = null) + { + var _ctx = tf.Context; + if (squeeze_dims is null) + { + squeeze_dims = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Squeeze", name) { args = new object[] { input }, attrs = new Dictionary() { ["squeeze_dims"] = squeeze_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return squeeze_eager_fallback(input, squeeze_dims: squeeze_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["squeeze_dims"] = squeeze_dims; + var _op = tf.OpDefLib._apply_op_helper("Squeeze", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "squeeze_dims", _op.get_attr("squeeze_dims") }; + _execute.record_gradient("Squeeze", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor squeeze_eager_fallback(Tensor input, int[] squeeze_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "squeeze_dims", squeeze_dims }; + var _result = _execute.execute("Squeeze", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Squeeze", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Stops gradient computation. + /// + /// + /// + /// When executed in a graph, this op outputs its input tensor as-is. + /// + /// When building ops to compute gradients, this op prevents the contribution of + /// its inputs to be taken into account. Normally, the gradient generator adds ops + /// to a graph to compute the derivatives of a specified 'loss' by recursively + /// finding out inputs that contributed to its computation. If you insert this op + /// in the graph it inputs are masked from the gradient generator. They are not + /// taken into account for computing gradients. + /// + /// This is useful any time you want to compute a value with TensorFlow but need + /// to pretend that the value was a constant. For example, the softmax function + /// for a vector x can be written as + /// + /// ```python + /// + /// def softmax(x): + /// numerator = tf.exp(x) + /// denominator = tf.reduce_sum(numerator) + /// return numerator / denominator + /// ``` + /// + /// This however is susceptible to overflow if the values in x are large. An + /// alternative more stable way is to subtract the maximum of x from each of the + /// values. + /// + /// ```python + /// + /// def stable_softmax(x): + /// z = x - tf.reduce_max(x) + /// numerator = tf.exp(z) + /// denominator = tf.reduce_sum(numerator) + /// return numerator / denominator + /// ``` + /// + /// However, when we backprop through the softmax to x, we dont want to backprop + /// through the `tf.reduce_max(x)` (if the max values are not unique then the + /// gradient could flow to the wrong input) calculation and treat that as a + /// constant. Therefore, we should write this out as + /// + /// ```python + /// + /// def stable_softmax(x): + /// z = x - tf.stop_gradient(tf.reduce_max(x)) + /// numerator = tf.exp(z) + /// denominator = tf.reduce_sum(numerator) + /// return numerator / denominator + /// ``` + /// + /// Some other examples include: + /// + /// * The *EM* algorithm where the *M-step* should not involve backpropagation + /// through the output of the *E-step*. + /// * Contrastive divergence training of Boltzmann machines where, when + /// differentiating the energy function, the training must not backpropagate + /// through the graph that generated the samples from the model. + /// * Adversarial training, where no backprop should happen through the adversarial + /// example generation process. + /// + /// + /// + /// + public static Tensor stop_gradient(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StopGradient", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return stop_gradient_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("StopGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("StopGradient", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor stop_gradient_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("StopGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StopGradient", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return a strided slice from `input`. + /// + /// + /// + /// Note, most python users will want to use the Python `Tensor.__getitem__` + /// or `Variable.__getitem__` rather than this op directly. + /// + /// The goal of this op is to produce a new tensor with a subset of + /// the elements from the `n` dimensional `input` tensor. The subset is chosen using + /// a sequence of `m` sparse range specifications encoded into the arguments + /// of this function. Note, in some cases + /// `m` could be equal to `n`, but this need not be the case. Each + /// range specification entry can be one of the following: + /// + /// - An ellipsis (...). Ellipses are used to imply zero or more + /// dimensions of full-dimension selection and are produced using + /// `ellipsis_mask`. For example, `foo[...]` is the identity slice. + /// + /// - A new axis. This is used to insert a new shape=1 dimension and is + /// produced using `new_axis_mask`. For example, `foo[:, ...]` where + /// `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. + /// + /// + /// - A range `begin:end:stride`. This is used to specify how much to choose from + /// a given dimension. `stride` can be any integer but 0. `begin` is an integer + /// which represents the index of the first value to select while `end` represents + /// the index of the last value to select. The number of values selected in each + /// dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. + /// `begin` and `end` can be negative where `-1` is the last element, `-2` is + /// the second to last. `begin_mask` controls whether to replace the explicitly + /// given `begin` with an implicit effective value of `0` if `stride > 0` and + /// `-1` if `stride < 0`. `end_mask` is analogous but produces the number + /// required to create the largest open interval. For example, given a shape + /// `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do + /// not assume this is equivalent to `foo[0:-1]` which has an effective `begin` + /// and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the + /// first dimension of a tensor while dropping the last two (in the original + /// order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. + /// + /// - A single index. This is used to keep only elements that have a given + /// index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a + /// shape `(6,)` tensor. This is encoded in `begin` and `end` and + /// `shrink_axis_mask`. + /// + /// Each conceptual range specification is encoded in the op's argument. This + /// encoding is best understand by considering a non-trivial example. In + /// particular, + /// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as + /// + /// ``` + /// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) + /// end = [2, 4, x, x, -3, x] + /// strides = [1, 1, x, x, -1, 1] + /// begin_mask = 1<<4 | 1<<5 = 48 + /// end_mask = 1<<5 = 32 + /// ellipsis_mask = 1<<3 = 8 + /// new_axis_mask = 1<<2 = 4 + /// shrink_axis_mask = 1<<0 = 1 + /// ``` + /// + /// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of + /// the slice becomes (2, 1, 5, 5, 2, 5). + /// Let us walk step by step through each argument specification. + /// + /// 1. The first argument in the example slice is turned into `begin = 1` and + /// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we + /// also set the appropriate bit in `shrink_axis_mask`. + /// + /// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have + /// zero bits contributed. + /// + /// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 + /// dimension in the final shape. Dummy values are contributed to begin, + /// end and stride, while the new_axis_mask bit is set. + /// + /// 4. `...` grab the full ranges from as many dimensions as needed to + /// fully specify a slice for every dimension of the input shape. + /// + /// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated + /// with a dimension that has shape `s` is converted to a positive index + /// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion + /// is done internally so begin, end and strides receive x, -3, and -1. + /// The appropriate begin_mask bit is set to indicate the start range is the + /// full range (ignoring the x). + /// + /// 6. `:` indicates that the entire contents of the corresponding dimension + /// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides + /// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and + /// `end_mask` are also set. + /// + /// *Requirements*: + /// `0 != strides[i] for i in [0, m)` + /// `ellipsis_mask must be a power of two (only one ellipsis)` + /// + /// + /// + /// + /// + /// + /// + /// + /// a bitmask where a bit i being 1 means to ignore the begin + /// value and instead use the largest interval possible. At runtime + /// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or + /// `[-1, n-1]` if `stride[i] < 0` + /// + /// + /// + /// + /// analogous to `begin_mask` + /// + /// + /// + /// + /// a bitmask where bit `i` being 1 means the `i`th + /// position is actually an ellipsis. One bit at most can be 1. + /// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` + /// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis + /// implicitly creates as many range specifications as necessary to fully + /// specify the sliced range for every dimension. For example for a 4-dimensional + /// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. + /// + /// + /// + /// + /// a bitmask where bit `i` being 1 means the `i`th + /// specification creates a new shape 1 dimension. For example + /// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. + /// + /// + /// + /// + /// a bitmask where bit `i` implies that the `i`th + /// specification should shrink the dimensionality. begin and end + /// must imply a slice of size 1 in the dimension. For example in + /// python one might do `foo[:, 3, :]` which would result in + /// `shrink_axis_mask` being 2. + /// + /// + /// + public static Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StridedSlice", name) { args = new object[] { input, begin, end, strides }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return strided_slice_eager_fallback(input, begin, end, strides, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("StridedSlice", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor strided_slice_eager_fallback(Tensor input, Tensor begin, Tensor end, Tensor strides, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, begin, end, strides }; + object[] _attrs = new object[] { "T", input.dtype, "Index", begin.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask }; + var _result = _execute.execute("StridedSlice", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StridedSlice", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Assign `value` to the sliced l-value reference of `ref`. + /// + /// + /// + /// The values of `value` are assigned to the positions in the variable + /// `ref` that are selected by the slice parameters. The slice parameters + /// `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. + /// + /// NOTE this op currently does not support broadcasting and so `value`'s + /// shape must be exactly the shape produced by the slice of `ref`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor strided_slice_assign(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("strided_slice_assign op does not support eager execution. Arg ref is a ref."); + } + Dictionary keywords = new(); + keywords["ref"] = ref_; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["value"] = value; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("StridedSliceAssign", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("StridedSliceAssign", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor strided_slice_assign_eager_fallback(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + throw new RuntimeError($"strided_slice_assign op does not support eager execution. Arg 'ref' is a ref."); + } + /// + /// Returns the gradient of `StridedSlice`. + /// + /// + /// + /// Since `StridedSlice` cuts out pieces of its `input` which is size + /// `shape`, its gradient will have the same shape (which is passed here + /// as `shape`). The gradient will be zero in any element that the slice + /// does not select. + /// + /// Arguments are the same as StridedSliceGrad with the exception that + /// `dy` is the input gradient to be propagated and `shape` is the + /// shape of `StridedSlice`'s `input`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end, Tensor strides, Tensor dy, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StridedSliceGrad", name) { args = new object[] { shape, begin, end, strides, dy }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return strided_slice_grad_eager_fallback(shape, begin, end, strides, dy, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["shape"] = shape; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["dy"] = dy; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("StridedSliceGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("StridedSliceGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor strided_slice_grad_eager_fallback(Tensor shape, Tensor begin, Tensor end, Tensor strides, Tensor dy, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { shape, begin, end, strides, dy }; + object[] _attrs = new object[] { "T", dy.dtype, "Index", shape.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask }; + var _result = _execute.execute("StridedSliceGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StridedSliceGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Adds sparse `updates` to an existing tensor according to `indices`. + /// + /// + /// + /// This operation creates a new tensor by adding sparse `updates` to the passed + /// in `tensor`. + /// This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the + /// updates are added onto an existing tensor (as opposed to a variable). If the + /// memory for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `tensor.shape`. The last dimension of `indices` can be at most the rank of + /// `tensor.shape`: + /// + /// ``` + /// indices.shape[-1] <= tensor.shape.rank + /// ``` + /// + /// The last dimension of `indices` corresponds to indices into elements + /// (if `indices.shape[-1] = tensor.shape.rank`) or slices + /// (if `indices.shape[-1] < tensor.shape.rank`) along dimension + /// `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape + /// + /// ``` + /// indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + /// ``` + /// + /// The simplest form of `tensor_scatter_nd_add` is to add individual elements to a + /// tensor by index. For example, say we want to add 4 elements in a rank-1 + /// tensor with 8 elements. + /// + /// In Python, this scatter add operation would look like this: + /// + /// >>> indices = tf.constant([[4], [3], [1], [7]]) + /// >>> updates = tf.constant([9, 10, 11, 12]) + /// >>> tensor = tf.ones([8], dtype=tf.int32) + /// >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + /// >>> updated + /// + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + /// In Python, this scatter add operation would look like this: + /// + /// >>> indices = tf.constant([[0], [2]]) + /// >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// ... [7, 7, 7, 7], [8, 8, 8, 8]], + /// ... [[5, 5, 5, 5], [6, 6, 6, 6], + /// ... [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// >>> tensor = tf.ones([4, 4, 4],dtype=tf.int32) + /// >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + /// >>> updated + /// + /// + /// Note: on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_add(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterAdd", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_add_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterAdd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_add_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterAdd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Apply a sparse update to a tensor taking the element-wise maximum. + /// + /// + /// + /// Returns a new tensor copied from `tensor` whose values are element-wise maximum between + /// tensor and updates according to the indices. + /// + /// >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] + /// >>> indices = [[1], [4], [5]] + /// >>> updates = [1, -1, 1] + /// >>> tf.tensor_scatter_nd_max(tensor, indices, updates).numpy() + /// array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32) + /// + /// Refer to `tf.tensor_scatter_nd_update` for more details. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_max(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterMax", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_max_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterMax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_max_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterMax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterMax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_min(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterMin", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_min_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterMin", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_min_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterMin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterMin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Subtracts sparse `updates` from an existing tensor according to `indices`. + /// + /// + /// + /// This operation creates a new tensor by subtracting sparse `updates` from the + /// passed in `tensor`. + /// This operation is very similar to `tf.scatter_nd_sub`, except that the updates + /// are subtracted from an existing tensor (as opposed to a variable). If the memory + /// for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of `indices` corresponds to indices into elements + /// (if `indices.shape[-1] = shape.rank`) or slices + /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + /// `shape`. `updates` is a tensor with shape + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of tensor_scatter_sub is to subtract individual elements + /// from a tensor by index. For example, say we want to insert 4 scattered elements + /// in a rank-1 tensor with 8 elements. + /// + /// In Python, this scatter subtract operation would look like this: + /// + /// ```python + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// tensor = tf.ones([8], dtype=tf.int32) + /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + /// print(updated) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [1, -10, 1, -9, -8, 1, 1, -11] + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + /// In Python, this scatter add operation would look like this: + /// + /// ```python + /// indices = tf.constant([[0], [2]]) + /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// tensor = tf.ones([4, 4, 4],dtype=tf.int32) + /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + /// print(updated) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + /// [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_sub(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterSub", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_sub_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterSub", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterSub", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_sub_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterSub", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterSub", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Scatter `updates` into an existing tensor according to `indices`. + /// + /// + /// + /// This operation creates a new tensor by applying sparse `updates` to the passed + /// in `tensor`. + /// This operation is very similar to `tf.scatter_nd`, except that the updates are + /// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory + /// for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// If `indices` contains duplicates, then we pick the last update for the index. + /// + /// If an out of bound index is found on CPU, an error is returned. + /// + /// **WARNING**: There are some GPU specific semantics for this operation. + /// - If an out of bound index is found, the index is ignored. + /// - The order in which updates are applied is nondeterministic, so the output + /// will be nondeterministic if `indices` contains duplicates. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `shape`. + /// + /// * `indices` must have at least 2 axes: `(num_updates, index_depth)`. + /// * The last axis of `indices` is how deep to index into `tensor` so this index + /// depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim` + /// + /// if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. + /// if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input + /// `tensor`. + /// + /// Each `update` has a rank of `tensor.rank - indices.shape[-1]`. + /// The overall shape of `updates` is: + /// + /// ``` + /// indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + /// ``` + /// + /// For usage examples see the python [tf.tensor_scatter_nd_update]( + /// https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_update(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterUpdate", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_update_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterUpdate", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterUpdate", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_update_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterUpdate", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterUpdate", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Assign `value` to the sliced l-value reference of `input`. + /// + /// + /// + /// The values of `value` are assigned to the positions in the tensor `input` that + /// are selected by the slice parameters. The slice parameters `begin` `end` + /// `strides` etc. work exactly as in `StridedSlice`. + /// + /// NOTE this op currently does not support broadcasting and so `value`'s shape + /// must be exactly the shape produced by the slice of `input`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_strided_slice_update(Tensor input, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorStridedSliceUpdate", name) { args = new object[] { input, begin, end, strides, value }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_strided_slice_update_eager_fallback(input, begin, end, strides, value, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["value"] = value; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("TensorStridedSliceUpdate", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("TensorStridedSliceUpdate", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_strided_slice_update_eager_fallback(Tensor input, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, begin, end, strides, value }; + object[] _attrs = new object[] { "T", input.dtype, "Index", begin.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask }; + var _result = _execute.execute("TensorStridedSliceUpdate", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorStridedSliceUpdate", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Constructs a tensor by tiling a given tensor. + /// + /// + /// + /// This operation creates a new tensor by replicating `input` `multiples` times. + /// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + /// and the values of `input` are replicated `multiples[i]` times along the 'i'th + /// dimension. For example, tiling `[a b c d]` by `[2]` produces + /// `[a b c d a b c d]`. + /// + /// >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + /// >>> b = tf.constant([1,2], tf.int32) + /// >>> tf.tile(a, b) + /// + /// >>> c = tf.constant([2,1], tf.int32) + /// >>> tf.tile(a, c) + /// + /// >>> d = tf.constant([2,2], tf.int32) + /// >>> tf.tile(a, d) + /// + /// + /// + /// + /// + /// + public static Tensor tile(Tensor input, Tensor multiples, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tile", name) { args = new object[] { input, multiples }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tile_eager_fallback(input, multiples, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["multiples"] = multiples; + var _op = tf.OpDefLib._apply_op_helper("Tile", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tmultiples", _op._get_attr_type("Tmultiples") }; + _execute.record_gradient("Tile", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tile_eager_fallback(Tensor input, Tensor multiples, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, multiples }; + object[] _attrs = new object[] { "T", input.dtype, "Tmultiples", multiples.dtype }; + var _result = _execute.execute("Tile", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Tile", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the gradient of `Tile`. + /// + /// + /// + /// Since `Tile` takes an input and repeats the input `multiples` times + /// along each dimension, `TileGrad` takes in `multiples` and aggregates + /// each repeated tile of `input` into `output`. + /// + /// + /// + /// + /// + public static Tensor tile_grad(Tensor input, Tensor multiples, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TileGrad", name) { args = new object[] { input, multiples }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tile_grad_eager_fallback(input, multiples, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["multiples"] = multiples; + var _op = tf.OpDefLib._apply_op_helper("TileGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("TileGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tile_grad_eager_fallback(Tensor input, Tensor multiples, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, multiples }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("TileGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TileGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Shuffle dimensions of x according to a permutation. + /// + /// + /// + /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + /// + /// + /// + /// + /// + public static Tensor transpose(Tensor x, Tensor perm, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Transpose", name) { args = new object[] { x, perm }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return transpose_eager_fallback(x, perm, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["perm"] = perm; + var _op = tf.OpDefLib._apply_op_helper("Transpose", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tperm", _op._get_attr_type("Tperm") }; + _execute.record_gradient("Transpose", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor transpose_eager_fallback(Tensor x, Tensor perm, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, perm }; + object[] _attrs = new object[] { "T", x.dtype, "Tperm", perm.dtype }; + var _result = _execute.execute("Transpose", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Transpose", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Finds unique elements in a 1-D tensor. + /// + /// + /// + /// This operation returns a tensor `y` containing all of the unique elements of `x` + /// sorted in the same order that they occur in `x`; `x` does not need to be sorted. + /// This operation also returns a tensor `idx` the same size as `x` that contains + /// the index of each value of `x` in the unique output `y`. In other words: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// Examples: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx = unique(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// ``` + /// + /// ``` + /// # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5] + /// y, idx = unique(x) + /// y ==> [4, 5, 1, 2, 3] + /// idx ==> [0, 1, 2, 3, 4, 4, 0, 1] + /// ``` + /// + /// + /// + /// + /// + public static Tensor[] unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Unique", name) { args = new object[] { x }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unique_eager_fallback(x, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("Unique", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("Unique", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unique_eager_fallback(Tensor x, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "out_idx", out_idx }; + var _result = _execute.execute("Unique", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Unique", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds unique elements along an axis of a tensor. + /// + /// + /// + /// This operation either returns a tensor `y` containing unique elements + /// along the `axis` of a tensor. The returned unique elements is sorted + /// in the same order as they occur along `axis` in `x`. + /// This operation also returns a tensor `idx` that is the same size as + /// the number of the elements in `x` along the `axis` dimension. It + /// contains the index in the unique output `y`. + /// In other words, for an `1-D` tensor `x` with `axis = None: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx = unique(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// ``` + /// + /// For an `2-D` tensor `x` with `axis = 0`: + /// + /// ``` + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx = unique(x, axis=0) + /// y ==> [[1, 0, 0], + /// [2, 0, 0]] + /// idx ==> [0, 0, 1] + /// ``` + /// + /// For an `2-D` tensor `x` with `axis = 1`: + /// + /// ``` + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx = unique(x, axis=1) + /// y ==> [[1, 0], + /// [1, 0], + /// [2, 0]] + /// idx ==> [0, 1, 1] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor[] unique_v2(Tensor x, Tensor axis, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueV2", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unique_v2_eager_fallback(x, axis, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("UniqueV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Taxis", _op._get_attr_type("Taxis"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("UniqueV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unique_v2_eager_fallback(Tensor x, Tensor axis, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "T", x.dtype, "Taxis", axis.dtype, "out_idx", out_idx }; + var _result = _execute.execute("UniqueV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UniqueV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds unique elements in a 1-D tensor. + /// + /// + /// + /// This operation returns a tensor `y` containing all of the unique elements of `x` + /// sorted in the same order that they occur in `x`. This operation also returns a + /// tensor `idx` the same size as `x` that contains the index of each value of `x` + /// in the unique output `y`. Finally, it returns a third tensor `count` that + /// contains the count of each element of `y` in `x`. In other words: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx, count = unique_with_counts(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// count ==> [2, 1, 3, 1, 2] + /// ``` + /// + /// + /// + /// + /// + public static Tensor[] unique_with_counts(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueWithCounts", name) { args = new object[] { x }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unique_with_counts_eager_fallback(x, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("UniqueWithCounts", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("UniqueWithCounts", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unique_with_counts_eager_fallback(Tensor x, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "out_idx", out_idx }; + var _result = _execute.execute("UniqueWithCounts", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UniqueWithCounts", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds unique elements along an axis of a tensor. + /// + /// + /// + /// This operation either returns a tensor `y` containing unique elements + /// along the `axis` of a tensor. The returned unique elements is sorted + /// in the same order as they occur along `axis` in `x`. + /// This operation also returns a tensor `idx` and a tensor `count` + /// that are the same size as the number of the elements in `x` along the + /// `axis` dimension. The `idx` contains the index in the unique output `y` + /// and the `count` contains the count in the unique output `y`. + /// In other words, for an `1-D` tensor `x` with `axis = None: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) + /// y, idx, count = UniqueWithCountsV2(x, axis = [0]) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// count ==> [2, 1, 3, 1, 2] + /// ``` + /// + /// For a `2-D` tensor `x` with `axis = 0`: + /// + /// ``` + /// x = tf.constant([[1, 0, 0], + /// [1, 0, 0], + /// [2, 0, 0]]) + /// y, idx, count = UniqueWithCountsV2(x, axis=[0]) + /// y ==> [[1, 0, 0], + /// [2, 0, 0]] + /// idx ==> [0, 0, 1] + /// count ==> [2, 1] + /// ``` + /// + /// For a `2-D` tensor `x` with `axis = 1`: + /// + /// ``` + /// x = tf.constant([[1, 0, 0], + /// [1, 0, 0], + /// [2, 0, 0]]) + /// y, idx, count = UniqueWithCountsV2(x, axis=[1]) + /// y ==> [[1, 0], + /// [1, 0], + /// [2, 0]] + /// idx ==> [0, 1, 1] + /// count ==> [1, 2] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor[] unique_with_counts_v2(Tensor x, Tensor axis, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueWithCountsV2", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unique_with_counts_v2_eager_fallback(x, axis, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("UniqueWithCountsV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Taxis", _op._get_attr_type("Taxis"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("UniqueWithCountsV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unique_with_counts_v2_eager_fallback(Tensor x, Tensor axis, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "T", x.dtype, "Taxis", axis.dtype, "out_idx", out_idx }; + var _result = _execute.execute("UniqueWithCountsV2", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UniqueWithCountsV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. + /// + /// + /// + /// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. + /// For example, given a tensor of shape `(A, B, C, D)`; + /// + /// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` + /// and each tensor in `output` will have shape `(B, C, D)`. (Note that the + /// dimension unpacked along is gone, unlike `split`). + /// + /// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` + /// and each tensor in `output` will have shape `(A, C, D)`. + /// Etc. + /// + /// This is the opposite of `pack`. + /// + /// + /// + /// + /// + /// + /// Dimension along which to unpack. Negative values wrap around, so the + /// valid range is `[-R, R)`. + /// + /// + /// + public static Tensor[] unpack(Tensor value, int num = 0, int axis = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Unpack", name) { args = new object[] { value }, attrs = new Dictionary() { ["num"] = num, ["axis"] = axis } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unpack_eager_fallback(value, num: num, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["num"] = num; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("Unpack", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num", _op._get_attr_int("num"), "T", _op._get_attr_type("T"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("Unpack", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unpack_eager_fallback(Tensor value, int num, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value }; + object[] _attrs = new object[] { "num", num, "T", value.dtype, "axis", axis }; + var _result = _execute.execute("Unpack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Unpack", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Converts an array of flat indices into a tuple of coordinate arrays. + /// + /// + /// + /// + /// Example: + /// + /// ``` + /// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) + /// # 'dims' represent a hypothetical (3, 3) tensor of indices: + /// # [[0, 1, *2*], + /// # [3, 4, *5*], + /// # [6, *7*, 8]] + /// # For each entry from 'indices', this operation returns + /// # its coordinates (marked with '*'), such as + /// # 2 ==> (0, 2) + /// # 5 ==> (1, 2) + /// # 7 ==> (2, 1) + /// y ==> [[0, 1, 2], [2, 2, 1]] + /// ``` + /// + /// @compatibility(numpy) + /// Equivalent to np.unravel_index + /// @end_compatibility + /// + /// + /// + /// + /// + public static Tensor unravel_index(Tensor indices, Tensor dims, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnravelIndex", name) { args = new object[] { indices, dims }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unravel_index_eager_fallback(indices, dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["indices"] = indices; + keywords["dims"] = dims; + var _op = tf.OpDefLib._apply_op_helper("UnravelIndex", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("UnravelIndex", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unravel_index_eager_fallback(Tensor indices, Tensor dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { indices, dims }; + object[] _attrs = new object[] { "Tidx", indices.dtype }; + var _result = _execute.execute("UnravelIndex", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnravelIndex", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Applies upper_bound(sorted_search_values, values) along each row. + /// + /// + /// + /// Each set of rows with the same index in (sorted_inputs, values) is treated + /// independently. The resulting row is the equivalent of calling + /// `np.searchsorted(sorted_inputs, values, side='right')`. + /// + /// The result is not a global index to the entire + /// `Tensor`, but rather just the index in the last dimension. + /// + /// A 2-D example: + /// sorted_sequence = [[0, 3, 9, 9, 10], + /// [1, 2, 3, 4, 5]] + /// values = [[2, 4, 9], + /// [0, 2, 6]] + /// + /// result = UpperBound(sorted_sequence, values) + /// + /// result == [[1, 2, 4], + /// [0, 2, 5]] + /// + /// + /// + /// + /// + /// + public static Tensor upper_bound(Tensor sorted_inputs, Tensor values, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UpperBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return upper_bound_eager_fallback(sorted_inputs, values, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["sorted_inputs"] = sorted_inputs; + keywords["values"] = values; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("UpperBound", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("UpperBound", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor upper_bound_eager_fallback(Tensor sorted_inputs, Tensor values, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { sorted_inputs, values }; + object[] _attrs = new object[] { "T", sorted_inputs.dtype, "out_type", out_type }; + var _result = _execute.execute("UpperBound", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UpperBound", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns locations of nonzero / true values in a tensor. + /// + /// + /// + /// This operation returns the coordinates of true elements in `input`. The + /// coordinates are returned in a 2-D tensor where the first dimension (rows) + /// represents the number of true elements, and the second dimension (columns) + /// represents the coordinates of the true elements. Keep in mind, the shape of + /// the output tensor can vary depending on how many true values there are in + /// `input`. Indices are output in row-major order. + /// + /// For example: + /// + /// ``` + /// # 'input' tensor is [[True, False] + /// # [True, False]] + /// # 'input' has two true values, so output has two coordinates. + /// # 'input' has rank of 2, so coordinates have two indices. + /// where(input) ==> [[0, 0], + /// [1, 0]] + /// + /// # `input` tensor is [[[True, False] + /// # [True, False]] + /// # [[False, True] + /// # [False, True]] + /// # [[False, False] + /// # [False, True]]] + /// # 'input' has 5 true values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// # `input` tensor is [[[1.5, 0.0] + /// # [-0.5, 0.0]] + /// # [[0.0, 0.25] + /// # [0.0, 0.75]] + /// # [[0.0, 0.0] + /// # [0.0, 0.01]]] + /// # 'input' has 5 nonzero values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// # `input` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] + /// # [0.0 + 0.5j, 0.0 + 0.0j]] + /// # [[0.0 + 0.0j, 0.25 + 1.5j] + /// # [0.0 + 0.0j, 0.75 + 0.0j]] + /// # [[0.0 + 0.0j, 0.0 + 0.0j] + /// # [0.0 + 0.0j, 0.01 + 0.0j]]] + /// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// ``` + /// + /// + /// + /// + public static Tensor where(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Where", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return where_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Where", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Where", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor where_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Where", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Where", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a tensor of zeros with the same shape and type as x. + /// + /// + /// + public static Tensor zeros_like(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ZerosLike", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return zeros_like_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("ZerosLike", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ZerosLike", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor zeros_like_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("ZerosLike", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ZerosLike", _inputs_flat, _attrs, _result); + } + return _result[0]; } } diff --git a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs index bb84ac390..5663f9c97 100644 --- a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs @@ -19,7 +19,7 @@ public static Tensor[] partitioned_call(Tensors args, TF_DataType[] tout, EagerD { try { - return tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("PartitionedCall", name, + return tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "PartitionedCall", name, args, tout, f, config, config_proto, executor_type)); } catch (Exception) @@ -50,7 +50,7 @@ public static Tensor[] partitioned_call(Tensors args, TF_DataType[] tout, EagerD var output = tf.OpDefLib._apply_op_helper("PartitionedCall", name, kwargs); var result = output.outputs; - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } @@ -88,7 +88,7 @@ public static Tensor[] symbolic_gradient(Tensor[] input, TF_DataType[] Tout, Nam try { var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - "SymbolicGradient", name, input, Tout, f)); + tf.Context, "SymbolicGradient", name, input, Tout, f)); return _result; } catch (Exception) @@ -107,7 +107,7 @@ public static Tensor[] symbolic_gradient(Tensor[] input, TF_DataType[] Tout, Nam } var op = tf.OpDefLib._apply_op_helper("SymbolicGradient", name, new object[] { input, Tout, f }); var result = op.outputs; - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } @@ -117,8 +117,8 @@ public static Tensor[] symbolic_gradient(Tensor[] input, TF_DataType[] Tout, Nam public static Tensor[] symbolic_gradient_eager_fallback(Tensor[] input, TF_DataType[] Tout, NameAttrList f, string name, Context ctx) { object[] attrs = new object[] { "Tin", input, "Tout", Tout, "f", f }; - var result = execute.executes("SymbolicGradient", Tout.Length, input, attrs, ctx, name); - if (execute.must_record_gradient()) + var result = _execute.execute("SymbolicGradient", Tout.Length, input, attrs, ctx, name); + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } diff --git a/src/TensorFlowNET.Core/Operations/gen_io_ops.cs b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs new file mode 100644 index 000000000..490cb1880 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs @@ -0,0 +1,1378 @@ +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ + +using Tensorflow.Eager; +using Tensorflow.Contexts; +using static Tensorflow.Binding; + +namespace Tensorflow; + +internal static class gen_io_ops +{ + public static Tensor fixed_length_record_reader(int header_bytes = 0, int record_bytes = 0, int footer_bytes = 0, int hop_bytes = 0, string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReader", name, "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fixed_length_record_reader_eager_fallback(header_bytes: header_bytes, record_bytes: record_bytes, footer_bytes: footer_bytes, hop_bytes: hop_bytes, container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["header_bytes"] = header_bytes; keywords["record_bytes"] = record_bytes; keywords["footer_bytes"] = footer_bytes; keywords["hop_bytes"] = hop_bytes; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReader", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "header_bytes", _op._get_attr_int("header_bytes"), "record_bytes", _op._get_attr_int("record_bytes"), "footer_bytes", _op._get_attr_int("footer_bytes"), "hop_bytes", _op._get_attr_int("hop_bytes"), "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("FixedLengthRecordReader", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fixed_length_record_reader_eager_fallback(int header_bytes, int record_bytes, int footer_bytes, int hop_bytes, string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name }; + var _result = _execute.execute("FixedLengthRecordReader", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FixedLengthRecordReader", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor fixed_length_record_reader_v2(int header_bytes = 0, int record_bytes = 0, int footer_bytes = 0, int hop_bytes = 0, string container = "", string shared_name = "", string encoding = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReaderV2", name, "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name, "encoding", encoding)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fixed_length_record_reader_v2_eager_fallback(header_bytes: header_bytes, record_bytes: record_bytes, footer_bytes: footer_bytes, hop_bytes: hop_bytes, container: container, shared_name: shared_name, encoding: encoding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["header_bytes"] = header_bytes; keywords["record_bytes"] = record_bytes; keywords["footer_bytes"] = footer_bytes; keywords["hop_bytes"] = hop_bytes; keywords["container"] = container; keywords["shared_name"] = shared_name; keywords["encoding"] = encoding; var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReaderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "header_bytes", _op._get_attr_int("header_bytes"), "record_bytes", _op._get_attr_int("record_bytes"), "footer_bytes", _op._get_attr_int("footer_bytes"), "hop_bytes", _op._get_attr_int("hop_bytes"), "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name"), "encoding", _op.get_attr("encoding") }; + _execute.record_gradient("FixedLengthRecordReaderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fixed_length_record_reader_v2_eager_fallback(int header_bytes, int record_bytes, int footer_bytes, int hop_bytes, string container, string shared_name, string encoding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name, "encoding", encoding }; + var _result = _execute.execute("FixedLengthRecordReaderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FixedLengthRecordReaderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor identity_reader(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReader", name, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return identity_reader_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("IdentityReader", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("IdentityReader", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor identity_reader_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("IdentityReader", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IdentityReader", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor identity_reader_v2(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReaderV2", name, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return identity_reader_v2_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("IdentityReaderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("IdentityReaderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor identity_reader_v2_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("IdentityReaderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IdentityReaderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor matching_files(Tensor pattern, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatchingFiles", name, pattern)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matching_files_eager_fallback(pattern, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["pattern"] = pattern; + var _op = tf.OpDefLib._apply_op_helper("MatchingFiles", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("MatchingFiles", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matching_files_eager_fallback(Tensor pattern, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { pattern }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("MatchingFiles", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatchingFiles", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Operation merge_v2_checkpoints(Tensor checkpoint_prefixes, Tensor destination_prefix, bool delete_old_dirs = true, bool allow_missing_files = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MergeV2Checkpoints", name, checkpoint_prefixes, destination_prefix, "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files)); + return null; + } + catch (Exception) + { + } + try + { + return merge_v2_checkpoints_eager_fallback(checkpoint_prefixes, destination_prefix, delete_old_dirs: delete_old_dirs, allow_missing_files: allow_missing_files, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["checkpoint_prefixes"] = checkpoint_prefixes; + keywords["destination_prefix"] = destination_prefix; + keywords["delete_old_dirs"] = delete_old_dirs; keywords["allow_missing_files"] = allow_missing_files; var _op = tf.OpDefLib._apply_op_helper("MergeV2Checkpoints", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "delete_old_dirs", _op._get_attr_bool("delete_old_dirs"), "allow_missing_files", _op._get_attr_bool("allow_missing_files") }; + _execute.record_gradient("MergeV2Checkpoints", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor merge_v2_checkpoints_eager_fallback(Tensor checkpoint_prefixes, Tensor destination_prefix, bool delete_old_dirs, bool allow_missing_files, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { checkpoint_prefixes, destination_prefix }; + object[] _attrs = new object[] { "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files }; + var _result = _execute.execute("MergeV2Checkpoints", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MergeV2Checkpoints", _inputs_flat, _attrs, _result); + } + return null; + } + public static Tensor read_file(Tensor filename, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReadFile", name, filename)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return read_file_eager_fallback(filename, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["filename"] = filename; + var _op = tf.OpDefLib._apply_op_helper("ReadFile", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReadFile", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor read_file_eager_fallback(Tensor filename, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { filename }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReadFile", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReadFile", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor reader_num_records_produced(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_num_records_produced op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderNumRecordsProduced", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderNumRecordsProduced", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_num_records_produced_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_num_records_produced op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor reader_num_records_produced_v2(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumRecordsProducedV2", name, reader_handle)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reader_num_records_produced_v2_eager_fallback(reader_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderNumRecordsProducedV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderNumRecordsProducedV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_num_records_produced_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderNumRecordsProducedV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderNumRecordsProducedV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor reader_num_work_units_completed(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_num_work_units_completed op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderNumWorkUnitsCompleted", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderNumWorkUnitsCompleted", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_num_work_units_completed_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_num_work_units_completed op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor reader_num_work_units_completed_v2(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumWorkUnitsCompletedV2", name, reader_handle)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reader_num_work_units_completed_v2_eager_fallback(reader_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderNumWorkUnitsCompletedV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderNumWorkUnitsCompletedV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_num_work_units_completed_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderNumWorkUnitsCompletedV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderNumWorkUnitsCompletedV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor[] reader_read(Tensor reader_handle, Tensor queue_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_read op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["queue_handle"] = queue_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderRead", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderRead", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] reader_read_eager_fallback(Tensor reader_handle, Tensor queue_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_read op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor[] reader_read_up_to(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_read_up_to op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["queue_handle"] = queue_handle; + keywords["num_records"] = num_records; + var _op = tf.OpDefLib._apply_op_helper("ReaderReadUpTo", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderReadUpTo", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] reader_read_up_to_eager_fallback(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string name, Context ctx) + { + throw new RuntimeError($"reader_read_up_to op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor[] reader_read_up_to_v2(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadUpToV2", name, reader_handle, queue_handle, num_records)); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return reader_read_up_to_v2_eager_fallback(reader_handle, queue_handle, num_records, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["queue_handle"] = queue_handle; + keywords["num_records"] = num_records; + var _op = tf.OpDefLib._apply_op_helper("ReaderReadUpToV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderReadUpToV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] reader_read_up_to_v2_eager_fallback(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle, queue_handle, num_records }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderReadUpToV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderReadUpToV2", _inputs_flat, _attrs, _result); + } + return _result; + } + public static Tensor[] reader_read_v2(Tensor reader_handle, Tensor queue_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadV2", name, reader_handle, queue_handle)); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return reader_read_v2_eager_fallback(reader_handle, queue_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["queue_handle"] = queue_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderReadV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderReadV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] reader_read_v2_eager_fallback(Tensor reader_handle, Tensor queue_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle, queue_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderReadV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderReadV2", _inputs_flat, _attrs, _result); + } + return _result; + } + public static Operation reader_reset(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_reset op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderReset", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderReset", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor reader_reset_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_reset op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Operation reader_reset_v2(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderResetV2", name, reader_handle)); + return null; + } + catch (Exception) + { + } + try + { + return reader_reset_v2_eager_fallback(reader_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderResetV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderResetV2", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor reader_reset_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderResetV2", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderResetV2", _inputs_flat, _attrs, _result); + } + return null; + } + public static Operation reader_restore_state(Tensor reader_handle, Tensor state, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_restore_state op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["state"] = state; + var _op = tf.OpDefLib._apply_op_helper("ReaderRestoreState", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderRestoreState", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor reader_restore_state_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx) + { + throw new RuntimeError($"reader_restore_state op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Operation reader_restore_state_v2(Tensor reader_handle, Tensor state, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderRestoreStateV2", name, reader_handle, state)); + return null; + } + catch (Exception) + { + } + try + { + return reader_restore_state_v2_eager_fallback(reader_handle, state, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["state"] = state; + var _op = tf.OpDefLib._apply_op_helper("ReaderRestoreStateV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderRestoreStateV2", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor reader_restore_state_v2_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle, state }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderRestoreStateV2", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderRestoreStateV2", _inputs_flat, _attrs, _result); + } + return null; + } + public static Tensor reader_serialize_state(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_serialize_state op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderSerializeState", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderSerializeState", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_serialize_state_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_serialize_state op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor reader_serialize_state_v2(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderSerializeStateV2", name, reader_handle)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reader_serialize_state_v2_eager_fallback(reader_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderSerializeStateV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderSerializeStateV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_serialize_state_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderSerializeStateV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderSerializeStateV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor restore(Tensor file_pattern, Tensor tensor_name, TF_DataType dt, int preferred_shard = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Restore", name, file_pattern, tensor_name, "dt", dt, "preferred_shard", preferred_shard)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return restore_eager_fallback(file_pattern, tensor_name, dt: dt, preferred_shard: preferred_shard, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["file_pattern"] = file_pattern; + keywords["tensor_name"] = tensor_name; + keywords["dt"] = dt; keywords["preferred_shard"] = preferred_shard; var _op = tf.OpDefLib._apply_op_helper("Restore", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dt", _op._get_attr_type("dt"), "preferred_shard", _op._get_attr_int("preferred_shard") }; + _execute.record_gradient("Restore", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor restore_eager_fallback(Tensor file_pattern, Tensor tensor_name, TF_DataType dt, int preferred_shard, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { file_pattern, tensor_name }; + object[] _attrs = new object[] { "dt", dt, "preferred_shard", preferred_shard }; + var _result = _execute.execute("Restore", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Restore", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor restore_slice(Tensor file_pattern, Tensor tensor_name, Tensor shape_and_slice, TF_DataType dt, int preferred_shard = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreSlice", name, file_pattern, tensor_name, shape_and_slice, "dt", dt, "preferred_shard", preferred_shard)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return restore_slice_eager_fallback(file_pattern, tensor_name, shape_and_slice, dt: dt, preferred_shard: preferred_shard, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["file_pattern"] = file_pattern; + keywords["tensor_name"] = tensor_name; + keywords["shape_and_slice"] = shape_and_slice; + keywords["dt"] = dt; keywords["preferred_shard"] = preferred_shard; var _op = tf.OpDefLib._apply_op_helper("RestoreSlice", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dt", _op._get_attr_type("dt"), "preferred_shard", _op._get_attr_int("preferred_shard") }; + _execute.record_gradient("RestoreSlice", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor restore_slice_eager_fallback(Tensor file_pattern, Tensor tensor_name, Tensor shape_and_slice, TF_DataType dt, int preferred_shard, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { file_pattern, tensor_name, shape_and_slice }; + object[] _attrs = new object[] { "dt", dt, "preferred_shard", preferred_shard }; + var _result = _execute.execute("RestoreSlice", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RestoreSlice", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor restore_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreV2", name, prefix, tensor_names, shape_and_slices, "dtypes", dtypes)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return restore_v2_eager_fallback(prefix, tensor_names, shape_and_slices, dtypes: dtypes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["prefix"] = prefix; + keywords["tensor_names"] = tensor_names; + keywords["shape_and_slices"] = shape_and_slices; + keywords["dtypes"] = dtypes; var _op = tf.OpDefLib._apply_op_helper("RestoreV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtypes", _op.get_attr("dtypes") }; + _execute.record_gradient("RestoreV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor restore_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { prefix, tensor_names, shape_and_slices }; + object[] _attrs = new object[] { "dtypes", dtypes }; + var _result = _execute.execute("RestoreV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RestoreV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Operation save(Tensor filename, Tensor tensor_names, Tensor data, TF_DataType[] T, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Save", name, filename, tensor_names, data, "T", T)); + return null; + } + catch (Exception) + { + } + try + { + return save_eager_fallback(filename, tensor_names, data, T: T, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["filename"] = filename; + keywords["tensor_names"] = tensor_names; + keywords["data"] = data; + keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("Save", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T") }; + _execute.record_gradient("Save", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor save_eager_fallback(Tensor filename, Tensor tensor_names, Tensor data, TF_DataType[] T, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { filename, tensor_names, data }; + object[] _attrs = new object[] { "T", T }; + var _result = _execute.execute("Save", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Save", _inputs_flat, _attrs, _result); + } + return null; + } + public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, TF_DataType[] T, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveSlices", name, filename, tensor_names, shapes_and_slices, data, "T", T)); + return null; + } + catch (Exception) + { + } + try + { + return save_slices_eager_fallback(filename, tensor_names, shapes_and_slices, data, T: T, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["filename"] = filename; + keywords["tensor_names"] = tensor_names; + keywords["shapes_and_slices"] = shapes_and_slices; + keywords["data"] = data; + keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("SaveSlices", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T") }; + _execute.record_gradient("SaveSlices", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor save_slices_eager_fallback(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, TF_DataType[] T, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { filename, tensor_names, shapes_and_slices, data }; + object[] _attrs = new object[] { "T", T }; + var _result = _execute.execute("SaveSlices", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SaveSlices", _inputs_flat, _attrs, _result); + } + return null; + } + public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, TF_DataType[] dtypes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveV2", name, prefix, tensor_names, shape_and_slices, tensors, "dtypes", dtypes)); + return null; + } + catch (Exception) + { + } + try + { + return save_v2_eager_fallback(prefix, tensor_names, shape_and_slices, tensors, dtypes: dtypes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["prefix"] = prefix; + keywords["tensor_names"] = tensor_names; + keywords["shape_and_slices"] = shape_and_slices; + keywords["tensors"] = tensors; + keywords["dtypes"] = dtypes; var _op = tf.OpDefLib._apply_op_helper("SaveV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtypes", _op.get_attr("dtypes") }; + _execute.record_gradient("SaveV2", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor save_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, TF_DataType[] dtypes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { prefix, tensor_names, shape_and_slices, tensors }; + object[] _attrs = new object[] { "dtypes", dtypes }; + var _result = _execute.execute("SaveV2", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SaveV2", _inputs_flat, _attrs, _result); + } + return null; + } + public static Tensor sharded_filename(Tensor basename, Tensor shard, Tensor num_shards, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilename", name, basename, shard, num_shards)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sharded_filename_eager_fallback(basename, shard, num_shards, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["basename"] = basename; + keywords["shard"] = shard; + keywords["num_shards"] = num_shards; + var _op = tf.OpDefLib._apply_op_helper("ShardedFilename", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ShardedFilename", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sharded_filename_eager_fallback(Tensor basename, Tensor shard, Tensor num_shards, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { basename, shard, num_shards }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ShardedFilename", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ShardedFilename", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor sharded_filespec(Tensor basename, Tensor num_shards, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilespec", name, basename, num_shards)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sharded_filespec_eager_fallback(basename, num_shards, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["basename"] = basename; + keywords["num_shards"] = num_shards; + var _op = tf.OpDefLib._apply_op_helper("ShardedFilespec", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ShardedFilespec", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sharded_filespec_eager_fallback(Tensor basename, Tensor num_shards, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { basename, num_shards }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ShardedFilespec", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ShardedFilespec", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor text_line_reader(int skip_header_lines = 0, string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReader", name, "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return text_line_reader_eager_fallback(skip_header_lines: skip_header_lines, container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["skip_header_lines"] = skip_header_lines; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("TextLineReader", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "skip_header_lines", _op._get_attr_int("skip_header_lines"), "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("TextLineReader", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor text_line_reader_eager_fallback(int skip_header_lines, string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name }; + var _result = _execute.execute("TextLineReader", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TextLineReader", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor text_line_reader_v2(int skip_header_lines = 0, string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReaderV2", name, "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return text_line_reader_v2_eager_fallback(skip_header_lines: skip_header_lines, container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["skip_header_lines"] = skip_header_lines; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("TextLineReaderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "skip_header_lines", _op._get_attr_int("skip_header_lines"), "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("TextLineReaderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor text_line_reader_v2_eager_fallback(int skip_header_lines, string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name }; + var _result = _execute.execute("TextLineReaderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TextLineReaderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor whole_file_reader(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReader", name, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return whole_file_reader_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("WholeFileReader", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("WholeFileReader", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor whole_file_reader_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("WholeFileReader", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("WholeFileReader", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor whole_file_reader_v2(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReaderV2", name, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return whole_file_reader_v2_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("WholeFileReaderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("WholeFileReaderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor whole_file_reader_v2_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("WholeFileReaderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("WholeFileReaderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Operation write_file(Tensor filename, Tensor contents, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WriteFile", name, filename, contents)); + return null; + } + catch (Exception) + { + } + try + { + return write_file_eager_fallback(filename, contents, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["filename"] = filename; + keywords["contents"] = contents; + var _op = tf.OpDefLib._apply_op_helper("WriteFile", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("WriteFile", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor write_file_eager_fallback(Tensor filename, Tensor contents, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { filename, contents }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("WriteFile", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("WriteFile", _inputs_flat, _attrs, _result); + } + return null; + } +} diff --git a/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs b/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs index 03159aaa1..d2907f090 100644 --- a/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs @@ -26,7 +26,7 @@ public static Operation assert(Tensor condition, object[] data, long summarize = if (tf.Context.executing_eagerly()) { var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - "Assert", name, + tf.Context, "Assert", name, new object[] { condition, data, summarize })); return results[0]; diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 564abbd0f..3456d9b3d 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -1,569 +1,9487 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ + +using Tensorflow.Eager; using Tensorflow.Contexts; using static Tensorflow.Binding; -namespace Tensorflow +namespace Tensorflow; + +public static class gen_math_ops { - public static partial class gen_math_ops - { - public static Tensor _all(Tensor input, Tensor axis, bool keep_dims = false, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims }); - - return _op.outputs[0]; - } - - /// - /// Add all input tensors element wise. - /// - /// - /// - /// - public static Tensor add_n(Tensor[] inputs, string name = null) - => tf.Context.ExecuteOp("AddN", name, new ExecuteOpArgs() - { - OpInputArgs = new object[] { inputs } - }); - - /// - /// Returns the index with the largest value across dimensions of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor arg_max(Tensor input, Axis dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => tf.Context.ExecuteOp("ArgMax", name, new ExecuteOpArgs(input, dimension) - .SetAttributes(new { output_type })); - - - /// - /// Returns the index with the smallest value across dimensions of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => tf.Context.ExecuteOp("ArgMin", name, new ExecuteOpArgs(input, dimension) - .SetAttributes(new { output_type })); - - /// - /// Computes Psi, the derivative of Lgamma (the log of the absolute value of - /// `Gamma(x)`), element-wise. - /// - /// - /// - /// - public static Tensor digamma(Tensor x, string name = null) - => tf.OpDefLib._apply_op_helper("Digamma", name, args: new { x }).output; - - /// - /// Returns 0 if the denominator is zero. - /// - /// - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DivNoNan'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// - /// *NOTE*: DivNoNan supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("DivNoNan", name, new ExecuteOpArgs(x, y)); - - public static Tensor mean(Tensor input, int axis, bool keep_dims = false, string name = null) - => mean(input, ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name); - - /// - /// Computes the mean of elements across dimensions of a tensor. - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. - /// - /// A `Tensor`. Must be one of the following types: - /// `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. - /// The tensor to reduce. - /// A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. - /// An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `input`. - public static Tensor mean(Tensor input, Tensor axis, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Mean", name, new ExecuteOpArgs(input, axis) - { - GetGradientAttrs = (op) => new - { - T = op.get_attr("T"), - Tidx = op.get_attr("Tidx"), - keep_dims = op.get_attr("keep_dims") - } - }.SetAttributes(new { keep_dims, reduction_indices = axis })); - - public static Tensor mean(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null) - { - if (tf.Context.executing_eagerly()) - { - return mean_eager_fallback(inputs, axis, keep_dims: keep_dims, name: name, ctx: tf.Context); - } - - var _op = tf.OpDefLib._apply_op_helper("Mean", name, args: new { inputs, reduction_indices = axis, keep_dims = keep_dims }); - - return _op.output; - } - - private static Tensor mean_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null) - { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs }); - var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { axis }); - var _inputs_flat = input.concat(axis1); - var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; - - return tf.Runner.Execute(ctx, "Mean", 1, _inputs_flat, _attrs, name: name)[0]; - } - - public static Tensor prod(T1 input, T2 axis, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Prod", name, - new ExecuteOpArgs(input, axis).SetAttributes(new { keep_dims, reduction_indices = axis })); - - private static Tensor prod_eager_fallback(Tensor input_t, int[] axis, bool keep_dims, string name, Context ctx = null) - { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { input_t }); - var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { axis }); - var _inputs_flat = input.concat(axis1); - var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; - - return tf.Runner.Execute(ctx, "Prod", 1, _inputs_flat, _attrs, name: name)[0]; - } - - public static Tensor acos(Tensor x, string name = null) - => tf.Context.ExecuteOp("Acos", name, new ExecuteOpArgs(x)); - - public static Tensor asin(Tensor x, string name = null) - => tf.Context.ExecuteOp("Asin", name, new ExecuteOpArgs(x)); - - public static Tensor add(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("Add", name, new ExecuteOpArgs(x, y)); - - public static Tensor add(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Add", name, new ExecuteOpArgs(x, y)); - - public static Tensor add_v2(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("AddV2", name, new ExecuteOpArgs(x, y)); - - public static Tensor atan(Tensor x, string name = null) - => tf.Context.ExecuteOp("Atan", name, new ExecuteOpArgs(x)); - - public static Tensor ceil(Tensor x, string name = null) - => tf.Context.ExecuteOp("Ceil", name, new ExecuteOpArgs(x)); - - public static Tensor sin(Tensor x, string name = null) - => tf.Context.ExecuteOp("Sin", name, new ExecuteOpArgs(x)); - - /// - /// Computes sigmoid of x element-wise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sigmoid'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Specifically, y = 1 / (1 + exp(-x)). - /// - public static Tensor sigmoid(Tensor x, string name = "Sigmoid") - => tf.Context.ExecuteOp("Sigmoid", name, new ExecuteOpArgs(x)); + /// + /// Computes the absolute value of a tensor. + /// + /// + /// + /// Given a tensor `x`, this operation returns a tensor containing the absolute + /// value of each element in `x`. For example, if x is an input element and y is + /// an output element, this operation computes \(y = |x|\). + /// + /// + /// + /// + public static Tensor abs(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Abs", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return abs_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Abs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Abs", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor abs_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Abs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Abs", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the element-wise sum of a list of tensors. + /// + /// + /// + /// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not + /// wait for all of its inputs to be ready before beginning to sum. This can + /// save memory if inputs are ready at different times, since minimum temporary + /// storage is proportional to the output size rather than the inputs size. + /// + /// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. + /// + /// Returns a `Tensor` of same shape and type as the elements of `inputs`. + /// + /// + /// + /// + /// + /// Shape of elements of `inputs`. + /// + /// + /// + public static Tensor accumulate_nv2(Tensors inputs, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AccumulateNV2", name) { args = new object[] { inputs }, attrs = new Dictionary() { ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return accumulate_nv2_eager_fallback(inputs, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("AccumulateNV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("AccumulateNV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor accumulate_nv2_eager_fallback(Tensors inputs, Shape shape, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(inputs); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", inputs.Length, "T", inputs.dtype, "shape", shape }; + var _result = _execute.execute("AccumulateNV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AccumulateNV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes acos of x element-wise. + /// + /// + /// + /// + /// Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. + /// + /// Input range is `[-1, 1]` and the output has a range of `[0, pi]`. + /// + /// + /// + /// + /// + public static Tensor acos(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Acos", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return acos_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Acos", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Acos", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor acos_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Acos", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Acos", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes inverse hyperbolic cosine of x element-wise. + /// + /// + /// + /// Given an input tensor, the function computes inverse hyperbolic cosine of every element. + /// Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. + /// + /// ```python + /// x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] + /// ``` + /// + /// + /// + /// + public static Tensor acosh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Acosh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return acosh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Acosh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Acosh", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Computes the gradient of the sigmoid of x wrt its input. - /// - /// - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SigmoidGrad'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Specifically, grad = dy * y * (1 - y), where y = sigmoid(x), and - /// dy is the corresponding input gradient. - /// - public static Tensor sigmoid_grad(Tensor y, Tensor dy, string name = "SigmoidGrad") - => tf.Context.ExecuteOp("SigmoidGrad", name, new ExecuteOpArgs(y, dy)); + public static Tensor acosh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Acosh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Acosh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x + y element-wise. + /// + /// + /// + /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor. + /// + /// Both input and output have a range `(-inf, inf)`. + /// + /// + /// + /// + /// + /// + public static Tensor add(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Add", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return add_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Add", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Add", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor sign(T x, string name = "Sign") - => tf.Context.ExecuteOp("Sign", name, new ExecuteOpArgs(x)); + public static Tensor add_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Add", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Add", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Add all input tensors element wise. + /// + /// + /// + /// Inputs must be of same size and shape. + /// + /// ```python + /// x = [9, 7, 10] + /// tf.math.add_n(x) ==> 26 + /// ``` + /// + /// + /// + /// + public static Tensor add_n(Tensors inputs, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AddN", name) { args = new object[] { inputs }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return add_n_eager_fallback(inputs, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + var _op = tf.OpDefLib._apply_op_helper("AddN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AddN", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor sinh(Tensor x, string name = null) - => tf.Context.ExecuteOp("Sinh", name, new ExecuteOpArgs(x)); + public static Tensor add_n_eager_fallback(Tensors inputs, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(inputs); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", inputs.Length, "T", inputs.dtype }; + var _result = _execute.execute("AddN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AddN", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x + y element-wise. + /// + /// + /// + /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor add_v2(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AddV2", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return add_v2_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("AddV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("AddV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor cos(T x, string name = null) - => tf.Context.ExecuteOp("Cos", name, new ExecuteOpArgs(x)); + public static Tensor add_v2_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("AddV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AddV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the "logical and" of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor all(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "All", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return all_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("All", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("All", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor cosh(Tensor x, string name = null) - => tf.Context.ExecuteOp("Cosh", name, new ExecuteOpArgs(x)); + public static Tensor all_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("All", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("All", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the argument of a complex number. + /// + /// + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the argument of each element in `input`. All elements in + /// `input` must be complex numbers of the form \(a + bj\), where *a* + /// is the real part and *b* is the imaginary part. + /// + /// The argument returned by this operation is of the form \(atan2(b, a)\). + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.angle(input) ==> [2.0132, 1.056] + /// ``` + /// + /// @compatibility(numpy) + /// Equivalent to np.angle. + /// @end_compatibility + /// + /// + /// + /// + /// + public static Tensor angle(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Angle", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return angle_eager_fallback(input, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("Angle", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("Angle", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Computes the sum along segments of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string name = null) + public static Tensor angle_eager_fallback(Tensor input, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "Tout", Tout }; + var _result = _execute.execute("Angle", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Angle", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the "logical or" of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor any(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Any", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return any_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Any", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments }); - return _op.outputs[0]; + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Any", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor tan(Tensor x, string name = null) - => tf.Context.ExecuteOp("Tan", name, new ExecuteOpArgs(x)); - - public static Tensor tanh(Tensor x, string name = null) - => tf.Context.ExecuteOp("Tanh", name, new ExecuteOpArgs(x)); - - /// - /// Computes the gradient for the tanh of `x` wrt its input. - /// - /// - /// - /// - /// - public static Tensor tanh_grad(Tensor y, Tensor dy, string name = null) - => tf.Context.ExecuteOp("TanhGrad", name, new ExecuteOpArgs(y, dy)); - - public static Tensor floor(Tensor x, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("Floor", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max }); - - return _op.outputs[0]; - } - - public static Tensor greater(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Greater", name, new ExecuteOpArgs(x, y)); - - /// - /// Computes the log of the absolute value of `Gamma(x)` element-wise. - /// - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. - /// - /// - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - public static Tensor lgamma(Tensor x, string name = null) - => tf.Context.ExecuteOp("Lgamma", name, new ExecuteOpArgs(x)); - - - public static Tensor greater_equal(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("GreaterEqual", name, new ExecuteOpArgs(x, y)); - - public static Tensor less(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Less", name, new ExecuteOpArgs(x, y)); - - public static Tensor less_equal(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("LessEqual", name, new ExecuteOpArgs(x, y)); - - public static Tensor log1p(Tensor x, string name = null) - => tf.Context.ExecuteOp("Log1p", name, new ExecuteOpArgs(x)); - - public static Tensor logical_and(T x, T y, string name = null) - => tf.Context.ExecuteOp("LogicalAnd", name, new ExecuteOpArgs(x, y)); - - public static Tensor logical_not(Tensor x, string name = null) - => tf.Context.ExecuteOp("LogicalNot", name, new ExecuteOpArgs(x)); - - public static Tensor logical_or(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("LogicalOr", name, new ExecuteOpArgs(x, y)); - - public static Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor") - { - return logical_and( - logical_or(x, y), - logical_not(logical_and(x, y)), - name); - } + public static Tensor any_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Any", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Any", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of abs(x-y) < tolerance element-wise. + /// + /// + /// + /// + /// + public static Tensor approximate_equal(Tensor x, Tensor y, float tolerance = 1E-05f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ApproximateEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["tolerance"] = tolerance } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return approximate_equal_eager_fallback(x, y, tolerance: tolerance, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["tolerance"] = tolerance; + var _op = tf.OpDefLib._apply_op_helper("ApproximateEqual", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "tolerance", _op.get_attr("tolerance") }; + _execute.record_gradient("ApproximateEqual", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor squared_difference(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("SquaredDifference", name, new ExecuteOpArgs(x, y)); + public static Tensor approximate_equal_eager_fallback(Tensor x, Tensor y, float tolerance, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "tolerance", tolerance }; + var _result = _execute.execute("ApproximateEqual", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ApproximateEqual", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the index with the largest value across dimensions of a tensor. + /// + /// + /// + /// Note that in case of ties the identity of the return value is not guaranteed. + /// + /// Usage: + /// ```python + /// import tensorflow as tf + /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] + /// b = tf.math.argmax(input = a) + /// c = tf.keras.backend.eval(b) + /// # c = 4 + /// # here a[4] = 166.32 which is the largest element of a across axis 0 + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor arg_max(Tensor input, Tensor dimension, TF_DataType output_type = TF_DataType.TF_INT64, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ArgMax", name) { args = new object[] { input, dimension }, attrs = new Dictionary() { ["output_type"] = output_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return arg_max_eager_fallback(input, dimension, output_type: output_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["dimension"] = dimension; + keywords["output_type"] = output_type; + var _op = tf.OpDefLib._apply_op_helper("ArgMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "output_type", _op._get_attr_type("output_type") }; + _execute.record_gradient("ArgMax", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Computes square of x element-wise. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor square(Tensor x, string name = null) - => tf.Context.ExecuteOp("Square", name, new ExecuteOpArgs(x)); + public static Tensor arg_max_eager_fallback(Tensor input, Tensor dimension, TF_DataType output_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, dimension }; + object[] _attrs = new object[] { "T", input.dtype, "Tidx", dimension.dtype, "output_type", output_type }; + var _result = _execute.execute("ArgMax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ArgMax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the index with the smallest value across dimensions of a tensor. + /// + /// + /// + /// Note that in case of ties the identity of the return value is not guaranteed. + /// + /// Usage: + /// ```python + /// import tensorflow as tf + /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] + /// b = tf.math.argmin(input = a) + /// c = tf.keras.backend.eval(b) + /// # c = 0 + /// # here a[0] = 1 which is the smallest element of a across axis 0 + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor arg_min(Tensor input, Tensor dimension, TF_DataType output_type = TF_DataType.TF_INT64, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ArgMin", name) { args = new object[] { input, dimension }, attrs = new Dictionary() { ["output_type"] = output_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return arg_min_eager_fallback(input, dimension, output_type: output_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["dimension"] = dimension; + keywords["output_type"] = output_type; + var _op = tf.OpDefLib._apply_op_helper("ArgMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "output_type", _op._get_attr_type("output_type") }; + _execute.record_gradient("ArgMin", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Returns which elements of x are finite. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. - /// A name for the operation (optional). - /// A `Tensor` of type `bool`. - public static Tensor is_finite(Tensor x, string name = null) - => tf.Context.ExecuteOp("IsFinite", name, new ExecuteOpArgs(x)); + public static Tensor arg_min_eager_fallback(Tensor input, Tensor dimension, TF_DataType output_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, dimension }; + object[] _attrs = new object[] { "T", input.dtype, "Tidx", dimension.dtype, "output_type", output_type }; + var _result = _execute.execute("ArgMin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ArgMin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the trignometric inverse sine of x element-wise. + /// + /// + /// + /// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that + /// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. + /// + /// **Note**: The output of `tf.math.asin` will lie within the invertible range + /// of sine, i.e [-pi/2, pi/2]. + /// + /// For example: + /// + /// ```python + /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + /// x = tf.constant([1.047, 0.785]) + /// y = tf.math.sin(x) # [0.8659266, 0.7068252] + /// + /// tf.math.asin(y) # [1.047, 0.785] = x + /// ``` + /// + /// + /// + /// + /// + public static Tensor asin(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Asin", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return asin_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Asin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Asin", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor is_nan(Tensor x, string name = null) - => tf.Context.ExecuteOp("IsNan", name, new ExecuteOpArgs(x)); - - - /// - /// Computes exponential of x element-wise. \\(y = e^x\\). - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor exp(Tensor x, string name = null) - => tf.Context.ExecuteOp("Exp", name, new ExecuteOpArgs(x)); - - /// - /// Computes natural logarithm of x element-wise. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. - /// name: A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor log(Tensor x, string name = null) - => tf.Context.ExecuteOp("Log", name, new ExecuteOpArgs(x)); - - public static Tensor softplus(Tensor features, string name = null) - => tf.Context.ExecuteOp("Softplus", name, new ExecuteOpArgs(features)); - - public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate = false, string name = null) - => tf.Context.ExecuteOp("Cast", name, new ExecuteOpArgs(x) - .SetAttributes(new { DstT, Truncate })); - - public static Tensor neg(Tensor x, string name = null) - => tf.Context.ExecuteOp("Neg", name, new ExecuteOpArgs(x)); - - public static Tensor sqrt(Tensor x, string name = null) - => tf.Context.ExecuteOp("Sqrt", name, new ExecuteOpArgs(x)); - - public static Tensor sub(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("Sub", name, new ExecuteOpArgs(x, y)); - - public static Tensor sub(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Sub", name, new ExecuteOpArgs(x, y)); - - /// - /// Returns the truth value of (x == y) element-wise. - /// - /// - /// - /// - /// - public static Tensor equal(Tx x, Ty y, bool incompatible_shape_error = true, string name = null) - => tf.Context.ExecuteOp("Equal", name, new ExecuteOpArgs(x, y) - .SetAttributes(new - { - incompatible_shape_error - })); - - /// - /// Returns the truth value of (x != y) element-wise. - /// - /// The type of the x. - /// The type of the y. - /// The x. - /// The y. - /// The name. - /// - public static Tensor not_equal(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("NotEqual", name, new ExecuteOpArgs(x, y)); - - public static Tensor atan2(Tensor y, Tensor x, string name = null) - => tf.Context.ExecuteOp("Atan2", name, new ExecuteOpArgs(y, x)); - - public static Tensor mul(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(x, y)); - - public static Tensor mul_no_nan(Tx x, Ty y, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("MulNoNan", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor real_div(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("RealDiv", name, new ExecuteOpArgs(x, y)); - - public static Tensor reciprocal(Tensor x, string name = null) - => tf.Context.ExecuteOp("Reciprocal", name, new ExecuteOpArgs(x)); - - public static Tensor floor_mod(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("FloorMod", name, new ExecuteOpArgs(x, y)); - - public static Tensor floor_div(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("FloorDiv", name, new ExecuteOpArgs(x, y)); - - /// - /// Multiply the matrix "a" by the matrix "b". - /// - /// - /// - /// - /// - /// - /// - public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null) - => tf.Context.ExecuteOp("MatMul", name, new ExecuteOpArgs(a, b) - .SetAttributes(new - { - transpose_a, - transpose_b - })); - - /// - /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. - /// - /// - /// - /// - /// - public static Tensor maximum(T1 x, T2 y, string name = null) - => tf.Context.ExecuteOp("Maximum", name, new ExecuteOpArgs(x, y)); - - public static Tensor minimum(T1 x, T2 y, string name = null) - => tf.Context.ExecuteOp("Minimum", name, new ExecuteOpArgs(x, y)); - - public static Tensor _abs(Tensor x, string name = null) - => tf.Context.ExecuteOp("Abs", name, new ExecuteOpArgs(x)); - - public static Tensor _any(Tx input, Ty axis, bool keep_dims = false, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims }); - - return _op.outputs[0]; - } - - public static Tensor _max(Tx input, Ty axis, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Max", name, new ExecuteOpArgs(input, axis) - { - GetGradientAttrs = (op) => new - { - T = op.get_attr("T"), - keep_dims = op.get_attr("keep_dims"), - Tidx = op.get_attr("Tidx") - } - }.SetAttributes(new { keep_dims, reduction_indices = axis })); - - public static Tensor _min(Tx input, Ty axis, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Min", name, new ExecuteOpArgs(input, axis) - { - GetGradientAttrs = (op) => new - { - T = op.get_attr("T"), - keep_dims = op.get_attr("keep_dims"), - Tidx = op.get_attr("Tidx") - } - }.SetAttributes(new { keep_dims, reduction_indices = axis })); - - public static Tensor pow(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Pow", name, new ExecuteOpArgs(x, y)); - - public static Tensor _sum(Tx input, Ty axis = default, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Sum", name, - new ExecuteOpArgs(input, axis).SetAttributes(new { keep_dims, reduction_indices = axis })); - - private static Tensor _sum_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null) - { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs }); - var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, tf.int32, new[] { axis }); - var _inputs_flat = input.concat(axis1); - var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; - - return tf.Runner.Execute(ctx, "Sum", 1, _inputs_flat, _attrs, name: name)[0]; - } - - /// - /// Creates a sequence of numbers. - /// - /// - /// - /// - /// - /// - public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null) - => tf.Context.ExecuteOp("Range", name, new ExecuteOpArgs(start, limit, delta)); - - /// - /// Rounds the values of a tensor to the nearest integer, element-wise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Round'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Rounds half to even. Also known as bankers rounding. If you want to round - /// according to the current system rounding mode use std::cint. - /// - public static Tensor round(Tensor x, string name = "Round") - => tf.Context.ExecuteOp("Round", name, new ExecuteOpArgs(x)); - - /// - /// Computes reciprocal of square root of x element-wise. - /// - /// - /// - /// - public static Tensor rsqrt(Tensor x, string name = null) - => tf.Context.ExecuteOp("Rsqrt", name, new ExecuteOpArgs(x)); - - /// - /// Returns the fraction of zeros in value. - /// - /// A tensor of numeric type. - /// A name for the operation (optional). - /// The fraction of zeros in value, with type float32. - public static Tensor zero_fraction(Tensor value, string name = null) - => tf.Context.ExecuteOp("zero_fraction", name, new ExecuteOpArgs(value)); + public static Tensor asin_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Asin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Asin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes inverse hyperbolic sine of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes inverse hyperbolic sine + /// for every element in the tensor. Both input and output has a range of + /// `[-inf, inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] + /// ``` + /// + /// + /// + /// + public static Tensor asinh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Asinh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return asinh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Asinh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Asinh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor asinh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Asinh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Asinh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the trignometric inverse tangent of x element-wise. + /// + /// + /// + /// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that + /// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. + /// + /// **Note**: The output of `tf.math.atan` will lie within the invertible range + /// of tan, i.e (-pi/2, pi/2). + /// + /// For example: + /// + /// ```python + /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + /// x = tf.constant([1.047, 0.785]) + /// y = tf.math.tan(x) # [1.731261, 0.99920404] + /// + /// tf.math.atan(y) # [1.047, 0.785] = x + /// ``` + /// + /// + /// + /// + /// + public static Tensor atan(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return atan_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Atan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Atan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor atan_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Atan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Atan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes arctangent of `y/x` element-wise, respecting signs of the arguments. + /// + /// + /// + /// This is the angle \( heta in [-pi, pi] \) such that + /// \[ x = r cos( heta) \] + /// and + /// \[ y = r sin( heta) \] + /// where \(r = sqrt{x^2 + y^2} \). + /// + /// For example: + /// + /// >>> x = [1., 1.] + /// >>> y = [1., -1.] + /// >>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy()) + /// [ 45. -45.] + /// + /// + /// + /// + /// + /// + /// + public static Tensor atan2(Tensor y, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atan2", name) { args = new object[] { y, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return atan2_eager_fallback(y, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Atan2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Atan2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor atan2_eager_fallback(Tensor y, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, x }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("Atan2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Atan2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes inverse hyperbolic tangent of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes inverse hyperbolic tangent + /// for every element in the tensor. Input range is `[-1,1]` and output range is + /// `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the + /// input is `1`, output will be `inf`. Values outside the range will have + /// `nan` as output. + /// + /// ```python + /// x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) + /// tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] + /// ``` + /// + /// + /// + /// + public static Tensor atanh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atanh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return atanh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Atanh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Atanh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor atanh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Atanh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Atanh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiplies slices of two tensors in batches. + /// + /// + /// + /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + /// + /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + /// and `[..., r_y, c_y]`. + /// + /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + /// + /// + /// + /// + /// + /// If `True`, adjoint the slices of `x`. Defaults to `False`. + /// + /// + /// + /// + /// If `True`, adjoint the slices of `y`. Defaults to `False`. + /// + /// + /// + public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMul", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_mat_mul_eager_fallback(x, y, adj_x: adj_x, adj_y: adj_y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["adj_x"] = adj_x; + keywords["adj_y"] = adj_y; + var _op = tf.OpDefLib._apply_op_helper("BatchMatMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "adj_x", _op._get_attr_bool("adj_x"), "adj_y", _op._get_attr_bool("adj_y") }; + _execute.record_gradient("BatchMatMul", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor batch_mat_mul_eager_fallback(Tensor x, Tensor y, bool adj_x, bool adj_y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "adj_x", adj_x, "adj_y", adj_y }; + var _result = _execute.execute("BatchMatMul", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatMul", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiplies slices of two tensors in batches. + /// + /// + /// + /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + /// + /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + /// and `[..., r_y, c_y]`. + /// + /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + /// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More + /// about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + /// + /// + /// + /// + /// + /// + /// + /// If `True`, adjoint the slices of `x`. Defaults to `False`. + /// + /// + /// + /// + /// If `True`, adjoint the slices of `y`. Defaults to `False`. + /// + /// + /// + public static Tensor batch_mat_mul_v2(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMulV2", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_mat_mul_v2_eager_fallback(x, y, adj_x: adj_x, adj_y: adj_y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["adj_x"] = adj_x; + keywords["adj_y"] = adj_y; + var _op = tf.OpDefLib._apply_op_helper("BatchMatMulV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "adj_x", _op._get_attr_bool("adj_x"), "adj_y", _op._get_attr_bool("adj_y") }; + _execute.record_gradient("BatchMatMulV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor batch_mat_mul_v2_eager_fallback(Tensor x, Tensor y, bool adj_x, bool adj_y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "adj_x", adj_x, "adj_y", adj_y }; + var _result = _execute.execute("BatchMatMulV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatMulV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiplies slices of two tensors in batches. + /// + /// + /// + /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + /// + /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + /// and `[..., r_y, c_y]`. + /// + /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + /// *NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More + /// about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + /// + /// + /// + /// + /// + /// + /// + /// If not spcified, Tout is the same type to input type. + /// + /// + /// + /// + /// If `True`, adjoint the slices of `x`. Defaults to `False`. + /// + /// + /// + /// + /// If `True`, adjoint the slices of `y`. Defaults to `False`. + /// + /// + /// + public static Tensor batch_mat_mul_v3(Tensor x, Tensor y, TF_DataType Tout, bool adj_x = false, bool adj_y = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMulV3", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["Tout"] = Tout, ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_mat_mul_v3_eager_fallback(x, y, Tout: Tout, adj_x: adj_x, adj_y: adj_y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["Tout"] = Tout; + keywords["adj_x"] = adj_x; + keywords["adj_y"] = adj_y; + var _op = tf.OpDefLib._apply_op_helper("BatchMatMulV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Ta", _op._get_attr_type("Ta"), "Tb", _op._get_attr_type("Tb"), "Tout", _op._get_attr_type("Tout"), "adj_x", _op._get_attr_bool("adj_x"), "adj_y", _op._get_attr_bool("adj_y") }; + _execute.record_gradient("BatchMatMulV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor batch_mat_mul_v3_eager_fallback(Tensor x, Tensor y, TF_DataType Tout, bool adj_x, bool adj_y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "Ta", x.dtype, "Tb", y.dtype, "Tout", Tout, "adj_x", adj_x, "adj_y", adj_y }; + var _result = _execute.execute("BatchMatMulV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatMulV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the regularized incomplete beta integral \\(I_x(a, b)\\). + /// + /// + /// + /// The regularized incomplete beta integral is defined as: + /// + /// + /// \(I_x(a, b) = rac{B(x; a, b)}{B(a, b)}\) + /// + /// where + /// + /// + /// \(B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt\) + /// + /// + /// is the incomplete beta function and \(B(a, b)\) is the *complete* + /// beta function. + /// + /// + /// + /// + /// + /// + public static Tensor betainc(Tensor a, Tensor b, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Betainc", name) { args = new object[] { a, b, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return betainc_eager_fallback(a, b, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Betainc", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Betainc", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor betainc_eager_fallback(Tensor a, Tensor b, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Betainc", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Betainc", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// + /// + /// + /// + /// + public static Tensor bincount(Tensor arr, Tensor size, Tensor weights, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bincount", name) { args = new object[] { arr, size, weights }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bincount_eager_fallback(arr, size, weights, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["arr"] = arr; + keywords["size"] = size; + keywords["weights"] = weights; + var _op = tf.OpDefLib._apply_op_helper("Bincount", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Bincount", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bincount_eager_fallback(Tensor arr, Tensor size, Tensor weights, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { arr, size, weights }; + object[] _attrs = new object[] { "T", weights.dtype }; + var _result = _execute.execute("Bincount", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Bincount", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Bucketizes 'input' based on 'boundaries'. + /// + /// + /// + /// For example, if the inputs are + /// boundaries = [0, 10, 100] + /// input = [[-5, 10000] + /// [150, 10] + /// [5, 100]] + /// + /// then the output will be + /// output = [[0, 3] + /// [3, 2] + /// [1, 3]] + /// + /// + /// + /// + /// + /// A sorted list of floats gives the boundary of the buckets. + /// + /// + /// + public static Tensor bucketize(Tensor input, float[] boundaries, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bucketize", name) { args = new object[] { input }, attrs = new Dictionary() { ["boundaries"] = boundaries } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bucketize_eager_fallback(input, boundaries: boundaries, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["boundaries"] = boundaries; + var _op = tf.OpDefLib._apply_op_helper("Bucketize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "boundaries", _op.get_attr("boundaries") }; + _execute.record_gradient("Bucketize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bucketize_eager_fallback(Tensor input, float[] boundaries, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "boundaries", boundaries }; + var _result = _execute.execute("Bucketize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Bucketize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Cast x of type SrcT to y of DstT. + /// + /// + /// + /// + /// + public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cast", name) { args = new object[] { x }, attrs = new Dictionary() { ["DstT"] = DstT, ["Truncate"] = Truncate } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cast_eager_fallback(x, DstT: DstT, Truncate: Truncate, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["DstT"] = DstT; + keywords["Truncate"] = Truncate; + var _op = tf.OpDefLib._apply_op_helper("Cast", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "SrcT", _op._get_attr_type("SrcT"), "DstT", _op._get_attr_type("DstT"), "Truncate", _op._get_attr_bool("Truncate") }; + _execute.record_gradient("Cast", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cast_eager_fallback(Tensor x, TF_DataType DstT, bool Truncate, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "SrcT", x.dtype, "DstT", DstT, "Truncate", Truncate }; + var _result = _execute.execute("Cast", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cast", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise smallest integer not less than x. + /// + /// + /// + public static Tensor ceil(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Ceil", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ceil_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Ceil", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Ceil", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ceil_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Ceil", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Ceil", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Clips tensor values to a specified min and max. + /// + /// + /// + /// Given a tensor `t`, this operation returns a tensor of the same type and + /// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. + /// Any values less than `clip_value_min` are set to `clip_value_min`. Any values + /// greater than `clip_value_max` are set to `clip_value_max`. + /// + /// + /// + /// + /// + /// + public static Tensor clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ClipByValue", name) { args = new object[] { t, clip_value_min, clip_value_max }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return clip_by_value_eager_fallback(t, clip_value_min, clip_value_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["t"] = t; + keywords["clip_value_min"] = clip_value_min; + keywords["clip_value_max"] = clip_value_max; + var _op = tf.OpDefLib._apply_op_helper("ClipByValue", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ClipByValue", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor clip_by_value_eager_fallback(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { t, clip_value_min, clip_value_max }; + object[] _attrs = new object[] { "T", t.dtype }; + var _result = _execute.execute("ClipByValue", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ClipByValue", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Converts two real numbers to a complex number. + /// + /// + /// + /// Given a tensor `real` representing the real part of a complex number, and a + /// tensor `imag` representing the imaginary part of a complex number, this + /// operation returns complex numbers elementwise of the form \(a + bj\), where + /// *a* represents the `real` part and *b* represents the `imag` part. + /// + /// The input tensors `real` and `imag` must have the same shape. + /// + /// For example: + /// + /// ``` + /// # tensor 'real' is [2.25, 3.25] + /// # tensor `imag` is [4.75, 5.75] + /// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor complex(Tensor real, Tensor imag, TF_DataType Tout = TF_DataType.TF_COMPLEX64, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Complex", name) { args = new object[] { real, imag }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return complex_eager_fallback(real, imag, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["real"] = real; + keywords["imag"] = imag; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("Complex", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("Complex", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor complex_eager_fallback(Tensor real, Tensor imag, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { real, imag }; + object[] _attrs = new object[] { "T", real.dtype, "Tout", Tout }; + var _result = _execute.execute("Complex", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Complex", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the complex absolute value of a tensor. + /// + /// + /// + /// Given a tensor `x` of complex numbers, this operation returns a tensor of type + /// `float` or `double` that is the absolute value of each element in `x`. All + /// elements in `x` must be complex numbers of the form \(a + bj\). The absolute + /// value is computed as \( sqrt{a^2 + b^2}\). + /// + /// For example: + /// + /// >>> x = tf.complex(3.0, 4.0) + /// >>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + /// 5.0 + /// + /// + /// + /// + /// + /// + public static Tensor complex_abs(Tensor x, TF_DataType Tout = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ComplexAbs", name) { args = new object[] { x }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return complex_abs_eager_fallback(x, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("ComplexAbs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("ComplexAbs", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor complex_abs_eager_fallback(Tensor x, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "Tout", Tout }; + var _result = _execute.execute("ComplexAbs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ComplexAbs", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the complex conjugate of a complex number. + /// + /// + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// complex numbers that are the complex conjugate of each element in `input`. The + /// complex numbers in `input` must be of the form \(a + bj\), where *a* is the + /// real part and *b* is the imaginary part. + /// + /// The complex conjugate returned by this operation is of the form \(a - bj\). + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + /// ``` + /// + /// + /// + /// + public static Tensor conj(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conj", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conj_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Conj", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Conj", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conj_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Conj", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conj", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes cos of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes cosine of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `[-1,1]`. If input lies outside the boundary, `nan` + /// is returned. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] + /// ``` + /// + /// + /// + /// + public static Tensor cos(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cos", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cos_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Cos", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Cos", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cos_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Cos", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cos", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes hyperbolic cosine of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes hyperbolic cosine of every + /// element in the tensor. Input range is `[-inf, inf]` and output range + /// is `[1, inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + /// tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] + /// ``` + /// + /// + /// + /// + public static Tensor cosh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cosh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cosh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Cosh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Cosh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cosh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Cosh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cosh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the pairwise cross product. + /// + /// + /// + /// `a` and `b` must be the same shape; they can either be simple 3-element vectors, + /// or any shape where the innermost dimension is 3. In the latter case, each pair + /// of corresponding 3-element vectors is cross-multiplied independently. + /// + /// + /// + /// + /// + public static Tensor cross(Tensor a, Tensor b, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cross", name) { args = new object[] { a, b }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cross_eager_fallback(a, b, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + var _op = tf.OpDefLib._apply_op_helper("Cross", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Cross", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cross_eager_fallback(Tensor a, Tensor b, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Cross", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cross", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the cumulative product of the tensor `x` along `axis`. + /// + /// + /// + /// By default, this op performs an inclusive cumprod, which means that the first + /// element of the input is identical to the first element of the output: + /// + /// ```python + /// tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is + /// performed instead: + /// + /// ```python + /// tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + /// ``` + /// + /// By setting the `reverse` kwarg to `True`, the cumprod is performed in the + /// opposite direction: + /// + /// ```python + /// tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + /// ``` + /// + /// This is more efficient than using separate `tf.reverse` ops. + /// + /// The `reverse` and `exclusive` kwargs can also be combined: + /// + /// ```python + /// tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + /// ``` + /// + /// + /// + /// + /// + /// + /// If `True`, perform exclusive cumprod. + /// + /// + /// + /// + /// A `bool` (default: False). + /// + /// + /// + public static Tensor cumprod(Tensor x, Tensor axis, bool exclusive = false, bool reverse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cumprod", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cumprod_eager_fallback(x, axis, exclusive: exclusive, reverse: reverse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["exclusive"] = exclusive; + keywords["reverse"] = reverse; + var _op = tf.OpDefLib._apply_op_helper("Cumprod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "exclusive", _op._get_attr_bool("exclusive"), "reverse", _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Cumprod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cumprod_eager_fallback(Tensor x, Tensor axis, bool exclusive, bool reverse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "exclusive", exclusive, "reverse", reverse, "T", x.dtype, "Tidx", axis.dtype }; + var _result = _execute.execute("Cumprod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cumprod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the cumulative sum of the tensor `x` along `axis`. + /// + /// + /// + /// By default, this op performs an inclusive cumsum, which means that the first + /// element of the input is identical to the first element of the output: + /// + /// ```python + /// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is + /// performed instead: + /// + /// ```python + /// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + /// ``` + /// + /// By setting the `reverse` kwarg to `True`, the cumsum is performed in the + /// opposite direction: + /// + /// ```python + /// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + /// ``` + /// + /// This is more efficient than using separate `tf.reverse` ops. + /// + /// The `reverse` and `exclusive` kwargs can also be combined: + /// + /// ```python + /// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + /// ``` + /// + /// + /// + /// + /// + /// + /// If `True`, perform exclusive cumsum. + /// + /// + /// + /// + /// A `bool` (default: False). + /// + /// + /// + public static Tensor cumsum(Tensor x, Tensor axis, bool exclusive = false, bool reverse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cumsum", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cumsum_eager_fallback(x, axis, exclusive: exclusive, reverse: reverse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["exclusive"] = exclusive; + keywords["reverse"] = reverse; + var _op = tf.OpDefLib._apply_op_helper("Cumsum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "exclusive", _op._get_attr_bool("exclusive"), "reverse", _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Cumsum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cumsum_eager_fallback(Tensor x, Tensor axis, bool exclusive, bool reverse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "exclusive", exclusive, "reverse", reverse, "T", x.dtype, "Tidx", axis.dtype }; + var _result = _execute.execute("Cumsum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cumsum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the cumulative product of the tensor `x` along `axis`. + /// + /// + /// + /// By default, this op performs an inclusive cumulative log-sum-exp, + /// which means that the first + /// element of the input is identical to the first element of the output: + /// ```python + /// tf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is + /// performed instead: + /// ```python + /// tf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))] + /// ``` + /// Note that the neutral element of the log-sum-exp operation is `-inf`, + /// however, for performance reasons, the minimal value representable by the + /// floating point type is used instead. + /// + /// By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the + /// opposite direction. + /// + /// + /// + /// + /// + /// + /// If `True`, perform exclusive cumulative log-sum-exp. + /// + /// + /// + /// + /// A `bool` (default: False). + /// + /// + /// + public static Tensor cumulative_logsumexp(Tensor x, Tensor axis, bool exclusive = false, bool reverse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CumulativeLogsumexp", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cumulative_logsumexp_eager_fallback(x, axis, exclusive: exclusive, reverse: reverse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["exclusive"] = exclusive; + keywords["reverse"] = reverse; + var _op = tf.OpDefLib._apply_op_helper("CumulativeLogsumexp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "exclusive", _op._get_attr_bool("exclusive"), "reverse", _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("CumulativeLogsumexp", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cumulative_logsumexp_eager_fallback(Tensor x, Tensor axis, bool exclusive, bool reverse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "exclusive", exclusive, "reverse", reverse, "T", x.dtype, "Tidx", axis.dtype }; + var _result = _execute.execute("CumulativeLogsumexp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("CumulativeLogsumexp", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// + /// + /// + /// + /// + /// + /// bool; Whether the kernel should count the appearance or number of occurrences. + /// + /// + /// + public static Tensor dense_bincount(Tensor input, Tensor size, Tensor weights, bool binary_output = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DenseBincount", name) { args = new object[] { input, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dense_bincount_eager_fallback(input, size, weights, binary_output: binary_output, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["size"] = size; + keywords["weights"] = weights; + keywords["binary_output"] = binary_output; + var _op = tf.OpDefLib._apply_op_helper("DenseBincount", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx"), "T", _op._get_attr_type("T"), "binary_output", _op._get_attr_bool("binary_output") }; + _execute.record_gradient("DenseBincount", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dense_bincount_eager_fallback(Tensor input, Tensor size, Tensor weights, bool binary_output, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, size, weights }; + object[] _attrs = new object[] { "Tidx", input.dtype, "T", weights.dtype, "binary_output", binary_output }; + var _result = _execute.execute("DenseBincount", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DenseBincount", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes Psi, the derivative of Lgamma (the log of the absolute value of + /// + /// + /// + /// `Gamma(x)`), element-wise. + /// + /// + /// + /// + public static Tensor digamma(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Digamma", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return digamma_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Digamma", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Digamma", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor digamma_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Digamma", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Digamma", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x / y element-wise. + /// + /// + /// + /// *NOTE*: `Div` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor div(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Div", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return div_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Div", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Div", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor div_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Div", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Div", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns 0 if the denominator is zero. + /// + /// + /// + /// + /// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor div_no_nan(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DivNoNan", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return div_no_nan_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("DivNoNan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DivNoNan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor div_no_nan_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("DivNoNan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DivNoNan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x == y) element-wise. + /// + /// + /// + /// *NOTE*: `Equal` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// ```python + /// x = tf.constant([2, 4]) + /// y = tf.constant(2) + /// tf.math.equal(x, y) ==> array([True, False]) + /// + /// x = tf.constant([2, 4]) + /// y = tf.constant([2, 4]) + /// tf.math.equal(x, y) ==> array([True, True]) + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor equal(Tensor x, Tensor y, bool incompatible_shape_error = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Equal", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["incompatible_shape_error"] = incompatible_shape_error } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return equal_eager_fallback(x, y, incompatible_shape_error: incompatible_shape_error, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["incompatible_shape_error"] = incompatible_shape_error; + var _op = tf.OpDefLib._apply_op_helper("Equal", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "incompatible_shape_error", _op._get_attr_bool("incompatible_shape_error") }; + _execute.record_gradient("Equal", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor equal_eager_fallback(Tensor x, Tensor y, bool incompatible_shape_error, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "incompatible_shape_error", incompatible_shape_error }; + var _result = _execute.execute("Equal", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Equal", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$. + /// + /// + /// + public static Tensor erf(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erf", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return erf_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Erf", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Erf", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor erf_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Erf", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Erf", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the complementary error function of `x` element-wise. + /// + /// + /// + public static Tensor erfc(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erfc", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return erfc_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Erfc", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Erfc", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor erfc_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Erfc", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Erfc", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + public static Tensor erfinv(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erfinv", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return erfinv_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Erfinv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Erfinv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor erfinv_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Erfinv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Erfinv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the euclidean norm of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor euclidean_norm(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EuclideanNorm", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return euclidean_norm_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("EuclideanNorm", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("EuclideanNorm", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor euclidean_norm_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("EuclideanNorm", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EuclideanNorm", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes exponential of x element-wise. \\(y = e^x\\). + /// + /// + /// + /// This function computes the exponential of every element in the input tensor. + /// i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. + /// `e` denotes Euler's number and is approximately equal to 2.718281. + /// Output is positive for any real input. + /// + /// ```python + /// x = tf.constant(2.0) + /// tf.math.exp(x) ==> 7.389056 + /// + /// x = tf.constant([2.0, 8.0]) + /// tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) + /// ``` + /// + /// For complex numbers, the exponential value is calculated as follows: + /// + /// ``` + /// e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) + /// ``` + /// + /// Let's consider complex number 1+1j as an example. + /// e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) + /// + /// ```python + /// x = tf.constant(1 + 1j) + /// tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j + /// ``` + /// + /// + /// + /// + public static Tensor exp(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Exp", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return exp_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Exp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Exp", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor exp_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Exp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Exp", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes `exp(x) - 1` element-wise. + /// + /// + /// + /// i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. + /// `e` denotes Euler's number and is approximately equal to 2.718281. + /// + /// ```python + /// x = tf.constant(2.0) + /// tf.math.expm1(x) ==> 6.389056 + /// + /// x = tf.constant([2.0, 8.0]) + /// tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) + /// + /// x = tf.constant(1 + 1j) + /// tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) + /// ``` + /// + /// + /// + /// + public static Tensor expm1(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Expm1", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return expm1_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Expm1", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Expm1", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor expm1_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Expm1", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Expm1", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise largest integer not greater than x. + /// + /// + /// + public static Tensor floor(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Floor", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return floor_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Floor", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Floor", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor floor_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Floor", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Floor", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x // y element-wise. + /// + /// + /// + /// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor floor_div(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FloorDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return floor_div_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("FloorDiv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("FloorDiv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor floor_div_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("FloorDiv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FloorDiv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise remainder of division. + /// + /// + /// + /// This follows Python semantics in that the + /// result here is consistent with a flooring divide. E.g. + /// `floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y. + /// + /// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor floor_mod(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FloorMod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return floor_mod_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("FloorMod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("FloorMod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor floor_mod_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("FloorMod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FloorMod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x > y) element-wise. + /// + /// + /// + /// *NOTE*: `Greater` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 2, 5]) + /// tf.math.greater(x, y) ==> [False, True, True] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.greater(x, y) ==> [False, False, True] + /// ``` + /// + /// + /// + /// + /// + public static Tensor greater(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Greater", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return greater_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Greater", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Greater", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor greater_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Greater", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Greater", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x >= y) element-wise. + /// + /// + /// + /// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6, 7]) + /// y = tf.constant([5, 2, 5, 10]) + /// tf.math.greater_equal(x, y) ==> [True, True, True, False] + /// + /// x = tf.constant([5, 4, 6, 7]) + /// y = tf.constant([5]) + /// tf.math.greater_equal(x, y) ==> [True, False, True, True] + /// ``` + /// + /// + /// + /// + /// + public static Tensor greater_equal(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GreaterEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return greater_equal_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("GreaterEqual", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("GreaterEqual", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor greater_equal_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("GreaterEqual", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("GreaterEqual", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return histogram of values. + /// + /// + /// + /// Given the tensor `values`, this operation returns a rank 1 histogram counting + /// the number of entries in `values` that fall into every bin. The bins are + /// equal width and determined by the arguments `value_range` and `nbins`. + /// + /// ```python + /// # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + /// nbins = 5 + /// value_range = [0.0, 5.0] + /// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + /// + /// with tf.get_default_session() as sess: + /// hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + /// variables.global_variables_initializer().run() + /// sess.run(hist) => [2, 1, 1, 0, 2] + /// ``` + /// + /// + /// + /// + /// + /// + /// + public static Tensor histogram_fixed_width(Tensor values, Tensor value_range, Tensor nbins, TF_DataType dtype = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "HistogramFixedWidth", name) { args = new object[] { values, value_range, nbins }, attrs = new Dictionary() { ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return histogram_fixed_width_eager_fallback(values, value_range, nbins, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["values"] = values; + keywords["value_range"] = value_range; + keywords["nbins"] = nbins; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("HistogramFixedWidth", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("HistogramFixedWidth", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor histogram_fixed_width_eager_fallback(Tensor values, Tensor value_range, Tensor nbins, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { values, value_range, nbins }; + object[] _attrs = new object[] { "T", values.dtype, "dtype", dtype }; + var _result = _execute.execute("HistogramFixedWidth", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("HistogramFixedWidth", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the lower regularized incomplete Gamma function `P(a, x)`. + /// + /// + /// + /// The lower regularized incomplete Gamma function is defined as: + /// + /// + /// \(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\) + /// + /// where + /// + /// \(gamma(a, x) = \int_{0}^{x} t^{a-1} exp(-t) dt\) + /// + /// is the lower incomplete Gamma function. + /// + /// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete + /// Gamma function. + /// + /// + /// + /// + /// + public static Tensor igamma(Tensor a, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Igamma", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return igamma_eager_fallback(a, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Igamma", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Igamma", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor igamma_eager_fallback(Tensor a, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Igamma", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Igamma", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient of `igamma(a, x)` wrt `a`. + /// + /// + /// + /// + public static Tensor igamma_grad_a(Tensor a, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IgammaGradA", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return igamma_grad_a_eager_fallback(a, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("IgammaGradA", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("IgammaGradA", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor igamma_grad_a_eager_fallback(Tensor a, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("IgammaGradA", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IgammaGradA", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the upper regularized incomplete Gamma function `Q(a, x)`. + /// + /// + /// + /// The upper regularized incomplete Gamma function is defined as: + /// + /// \(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\) + /// + /// where + /// + /// \(Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt\) + /// + /// is the upper incomplete Gamma function. + /// + /// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete + /// Gamma function. + /// + /// + /// + /// + /// + public static Tensor igammac(Tensor a, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Igammac", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return igammac_eager_fallback(a, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Igammac", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Igammac", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor igammac_eager_fallback(Tensor a, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Igammac", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Igammac", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the imaginary part of a complex number. + /// + /// + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the imaginary part of each element in `input`. All + /// elements in `input` must be complex numbers of the form \(a + bj\), where *a* + /// is the real part and *b* is the imaginary part returned by this operation. + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.imag(input) ==> [4.75, 5.75] + /// ``` + /// + /// + /// + /// + /// + public static Tensor imag(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Imag", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return imag_eager_fallback(input, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("Imag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("Imag", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor imag_eager_fallback(Tensor input, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "Tout", Tout }; + var _result = _execute.execute("Imag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Imag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the reciprocal of x element-wise. + /// + /// + /// + /// I.e., \(y = 1 / x\). + /// + /// + /// + /// + public static Tensor inv(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Inv", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inv_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Inv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Inv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inv_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Inv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Inv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the inverse of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor inv_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inv_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("InvGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InvGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inv_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("InvGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InvGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns which elements of x are finite. + /// + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isfinite + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) + /// tf.math.is_finite(x) ==> [True, True, True, False, False] + /// ``` + /// + /// + /// + /// + public static Tensor is_finite(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsFinite", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return is_finite_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("IsFinite", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("IsFinite", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor is_finite_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("IsFinite", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IsFinite", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns which elements of x are Inf. + /// + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isinf + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, np.inf, 6.8, np.inf]) + /// tf.math.is_inf(x) ==> [False, True, False, True] + /// ``` + /// + /// + /// + /// + public static Tensor is_inf(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsInf", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return is_inf_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("IsInf", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("IsInf", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor is_inf_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("IsInf", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IsInf", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns which elements of x are NaN. + /// + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isnan + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) + /// tf.math.is_nan(x) ==> [False, True, False, True, False] + /// ``` + /// + /// + /// + /// + public static Tensor is_nan(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsNan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return is_nan_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("IsNan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("IsNan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor is_nan_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("IsNan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IsNan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x < y) element-wise. + /// + /// + /// + /// *NOTE*: `Less` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.less(x, y) ==> [False, True, False] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 6, 7]) + /// tf.math.less(x, y) ==> [False, True, True] + /// ``` + /// + /// + /// + /// + /// + public static Tensor less(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Less", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return less_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Less", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Less", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor less_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Less", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Less", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x <= y) element-wise. + /// + /// + /// + /// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.less_equal(x, y) ==> [True, True, False] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 6, 6]) + /// tf.math.less_equal(x, y) ==> [True, True, True] + /// ``` + /// + /// + /// + /// + /// + public static Tensor less_equal(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LessEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return less_equal_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("LessEqual", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("LessEqual", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor less_equal_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("LessEqual", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LessEqual", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the log of the absolute value of `Gamma(x)` element-wise. + /// + /// + /// + /// For positive numbers, this function computes log((input - 1)!) for every element in the tensor. + /// `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) + /// tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] + /// ``` + /// + /// + /// + /// + public static Tensor lgamma(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Lgamma", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return lgamma_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Lgamma", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Lgamma", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor lgamma_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Lgamma", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Lgamma", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Generates values in an interval. + /// + /// + /// + /// A sequence of `num` evenly-spaced values are generated beginning at `start`. + /// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, + /// so that the last one is exactly `stop`. + /// + /// For example: + /// + /// ``` + /// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor lin_space(Tensor start, Tensor stop, Tensor num, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LinSpace", name) { args = new object[] { start, stop, num }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return lin_space_eager_fallback(start, stop, num, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["start"] = start; + keywords["stop"] = stop; + keywords["num"] = num; + var _op = tf.OpDefLib._apply_op_helper("LinSpace", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("LinSpace", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor lin_space_eager_fallback(Tensor start, Tensor stop, Tensor num, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { start, stop, num }; + object[] _attrs = new object[] { "T", start.dtype, "Tidx", num.dtype }; + var _result = _execute.execute("LinSpace", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LinSpace", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes natural logarithm of x element-wise. + /// + /// + /// + /// I.e., \(y = log_e x\). + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 5]) + /// tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] + /// ``` + /// + /// + /// + /// + public static Tensor log(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Log", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return log_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Log", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Log", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor log_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Log", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Log", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes natural logarithm of (1 + x) element-wise. + /// + /// + /// + /// I.e., \(y = log_e (1 + x)\). + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 5]) + /// tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] + /// ``` + /// + /// + /// + /// + public static Tensor log1p(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Log1p", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return log1p_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Log1p", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Log1p", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor log1p_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Log1p", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Log1p", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of x AND y element-wise. + /// + /// + /// + /// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor logical_and(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalAnd", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return logical_and_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("LogicalAnd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("LogicalAnd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor logical_and_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("LogicalAnd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LogicalAnd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of `NOT x` element-wise. + /// + /// + /// + public static Tensor logical_not(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalNot", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return logical_not_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("LogicalNot", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("LogicalNot", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor logical_not_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("LogicalNot", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LogicalNot", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of x OR y element-wise. + /// + /// + /// + /// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor logical_or(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalOr", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return logical_or_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("LogicalOr", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("LogicalOr", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor logical_or_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("LogicalOr", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LogicalOr", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiply the matrix "a" by the matrix "b". + /// + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of + /// "a" (after being transposed if transpose_a is true) must match the + /// outer dimension of "b" (after being transposed if transposed_b is + /// true). + /// + /// *Note*: The default kernel implementation for MatMul on GPUs uses + /// cublas. + /// + /// + /// + /// + /// + /// + /// If true, "a" is transposed before multiplication. + /// + /// + /// + /// + /// If true, "b" is transposed before multiplication. + /// + /// + /// + public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatMul", name) { args = new object[] { a, b }, attrs = new Dictionary() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mat_mul_eager_fallback(a, b, transpose_a: transpose_a, transpose_b: transpose_b, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + var _op = tf.OpDefLib._apply_op_helper("MatMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatMul", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mat_mul_eager_fallback(Tensor a, Tensor b, bool transpose_a, bool transpose_b, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b }; + object[] _attrs = new object[] { "transpose_a", transpose_a, "transpose_b", transpose_b, "T", a.dtype }; + var _result = _execute.execute("MatMul", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatMul", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the maximum of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor max(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Max", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Max", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Max", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Max", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Max", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. + /// + /// + /// + /// *NOTE*: `Maximum` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor maximum(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Maximum", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return maximum_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Maximum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Maximum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor maximum_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Maximum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Maximum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the mean of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor mean(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mean", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mean_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Mean", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Mean", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mean_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Mean", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Mean", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the minimum of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor min(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Min", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return min_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Min", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Min", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor min_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Min", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Min", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the min of x and y (i.e. x < y ? x : y) element-wise. + /// + /// + /// + /// *NOTE*: `Minimum` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor minimum(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Minimum", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return minimum_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Minimum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Minimum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor minimum_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Minimum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Minimum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise remainder of division. This emulates C semantics in that + /// + /// + /// + /// the result here is consistent with a truncating divide. E.g. + /// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. + /// + /// *NOTE*: `Mod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor mod(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mod_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Mod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Mod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mod_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Mod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Mod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x * y element-wise. + /// + /// + /// + /// *NOTE*: `Mul` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor mul(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mul", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mul_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Mul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Mul", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mul_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Mul", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Mul", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. + /// + /// + /// + /// *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor mul_no_nan(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MulNoNan", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mul_no_nan_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("MulNoNan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MulNoNan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mul_no_nan_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("MulNoNan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MulNoNan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + public static Tensor ndtri(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Ndtri", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ndtri_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Ndtri", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Ndtri", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ndtri_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Ndtri", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Ndtri", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes numerical negative value element-wise. + /// + /// + /// + /// I.e., \(y = -x\). + /// + /// + /// + /// + public static Tensor neg(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Neg", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return neg_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Neg", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Neg", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor neg_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Neg", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Neg", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the next representable value of `x1` in the direction of `x2`, element-wise. + /// + /// + /// + /// This operation returns the same result as the C++ std::nextafter function. + /// + /// It can also return a subnormal number. + /// + /// @compatibility(cpp) + /// Equivalent to C++ std::nextafter function. + /// @end_compatibility + /// + /// + /// + /// + /// + public static Tensor next_after(Tensor x1, Tensor x2, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NextAfter", name) { args = new object[] { x1, x2 }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return next_after_eager_fallback(x1, x2, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x1"] = x1; + keywords["x2"] = x2; + var _op = tf.OpDefLib._apply_op_helper("NextAfter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("NextAfter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor next_after_eager_fallback(Tensor x1, Tensor x2, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x1, x2 }; + object[] _attrs = new object[] { "T", x1.dtype }; + var _result = _execute.execute("NextAfter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("NextAfter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x != y) element-wise. + /// + /// + /// + /// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + /// + public static Tensor not_equal(Tensor x, Tensor y, bool incompatible_shape_error = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NotEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["incompatible_shape_error"] = incompatible_shape_error } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return not_equal_eager_fallback(x, y, incompatible_shape_error: incompatible_shape_error, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["incompatible_shape_error"] = incompatible_shape_error; + var _op = tf.OpDefLib._apply_op_helper("NotEqual", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "incompatible_shape_error", _op._get_attr_bool("incompatible_shape_error") }; + _execute.record_gradient("NotEqual", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor not_equal_eager_fallback(Tensor x, Tensor y, bool incompatible_shape_error, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "incompatible_shape_error", incompatible_shape_error }; + var _result = _execute.execute("NotEqual", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("NotEqual", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the polygamma function \\(\psi^{(n)}(x)\\). + /// + /// + /// + /// The polygamma function is defined as: + /// + /// + /// \(psi^{(a)}(x) = rac{d^a}{dx^a} psi(x)\) + /// + /// where \(psi(x)\) is the digamma function. + /// The polygamma function is defined only for non-negative integer orders \a\. + /// + /// + /// + /// + /// + public static Tensor polygamma(Tensor a, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Polygamma", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return polygamma_eager_fallback(a, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Polygamma", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Polygamma", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor polygamma_eager_fallback(Tensor a, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Polygamma", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Polygamma", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the power of one value to another. + /// + /// + /// + /// Given a tensor `x` and a tensor `y`, this operation computes \(x^y\) for + /// corresponding elements in `x` and `y`. For example: + /// + /// ``` + /// # tensor 'x' is [[2, 2]], [3, 3]] + /// # tensor 'y' is [[8, 16], [2, 3]] + /// tf.pow(x, y) ==> [[256, 65536], [9, 27]] + /// ``` + /// + /// + /// + /// + /// + public static Tensor pow(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pow", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return pow_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Pow", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Pow", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor pow_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Pow", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Pow", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the product of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor prod(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Prod", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return prod_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Prod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Prod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor prod_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Prod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Prod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Convert the quantized 'input' tensor into a lower-precision 'output', using the + /// + /// + /// + /// actual distribution of the values to maximize the usage of the lower bit depth + /// and adjusting the output min and max ranges accordingly. + /// + /// [input_min, input_max] are scalar floats that specify the range for the float + /// interpretation of the 'input' data. For example, if input_min is -1.0f and + /// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + /// + /// This operator tries to squeeze as much precision as possible into an output with + /// a lower bit depth by calculating the actual min and max values found in the + /// data. For example, maybe that quint16 input has no values lower than 16,384 and + /// none higher than 49,152. That means only half the range is actually needed, all + /// the float interpretations are between -0.5f and 0.5f, so if we want to compress + /// the data into a quint8 output, we can use that range rather than the theoretical + /// -1.0f to 1.0f that is suggested by the input min and max. + /// + /// In practice, this is most useful for taking output from operations like + /// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + /// may have large potential output ranges, but in practice have a distribution of + /// input values that only uses a small fraction of the possible range. By feeding + /// that output into this operator, we can reduce it from 32 bits down to 8 with + /// minimal loss of accuracy. + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. Should be a lower bit depth than Tinput. + /// + /// + /// + public static Tensor[] quantize_down_and_shrink_range(Tensor input, Tensor input_min, Tensor input_max, TF_DataType out_type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeDownAndShrinkRange", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantize_down_and_shrink_range_eager_fallback(input, input_min, input_max, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizeDownAndShrinkRange", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizeDownAndShrinkRange", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantize_down_and_shrink_range_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "Tinput", input.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizeDownAndShrinkRange", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns x + y element-wise, working on quantized buffers. + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_add(Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType Toutput = TF_DataType.TF_QINT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedAdd", name) { args = new object[] { x, y, min_x, max_x, min_y, max_y }, attrs = new Dictionary() { ["Toutput"] = Toutput } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_add_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput: Toutput, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["min_x"] = min_x; + keywords["max_x"] = max_x; + keywords["min_y"] = min_y; + keywords["max_y"] = max_y; + keywords["Toutput"] = Toutput; + var _op = tf.OpDefLib._apply_op_helper("QuantizedAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Toutput", _op._get_attr_type("Toutput") }; + _execute.record_gradient("QuantizedAdd", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_add_eager_fallback(Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType Toutput, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y, min_x, max_x, min_y, max_y }; + object[] _attrs = new object[] { "T1", x.dtype, "T2", y.dtype, "Toutput", Toutput }; + var _result = _execute.execute("QuantizedAdd", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedAdd", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Perform a quantized matrix multiplication of `a` by the matrix `b`. + /// + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of + /// `a` (after being transposed if `transpose_a` is non-zero) must match the + /// outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, `a` is transposed before multiplication. + /// + /// + /// + /// + /// If true, `b` is transposed before multiplication. + /// + /// + /// + /// + /// The type of output produced by activation function + /// following this operation. + /// + /// + /// + public static Tensor[] quantized_mat_mul(Tensor a, Tensor b, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput = TF_DataType.TF_QINT32, bool transpose_a = false, bool transpose_b = false, TF_DataType Tactivation = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMul", name) { args = new object[] { a, b, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["Tactivation"] = Tactivation } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_eager_fallback(a, b, min_a, max_a, min_b, max_b, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, Tactivation: Tactivation, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["Tactivation"] = Tactivation; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "Tactivation", _op._get_attr_type("Tactivation") }; + _execute.record_gradient("QuantizedMatMul", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_eager_fallback(Tensor a, Tensor b, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput, bool transpose_a, bool transpose_b, TF_DataType Tactivation, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, min_a, max_a, min_b, max_b }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "Tactivation", Tactivation }; + var _result = _execute.execute("QuantizedMatMul", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMul", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns x * y element-wise, working on quantized buffers. + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_mul(Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType Toutput = TF_DataType.TF_QINT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMul", name) { args = new object[] { x, y, min_x, max_x, min_y, max_y }, attrs = new Dictionary() { ["Toutput"] = Toutput } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mul_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput: Toutput, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["min_x"] = min_x; + keywords["max_x"] = max_x; + keywords["min_y"] = min_y; + keywords["max_y"] = max_y; + keywords["Toutput"] = Toutput; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Toutput", _op._get_attr_type("Toutput") }; + _execute.record_gradient("QuantizedMul", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mul_eager_fallback(Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType Toutput, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y, min_x, max_x, min_y, max_y }; + object[] _attrs = new object[] { "T1", x.dtype, "T2", y.dtype, "Toutput", Toutput }; + var _result = _execute.execute("QuantizedMul", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMul", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// + /// + /// + /// + /// + /// + /// + /// bool; Whether the kernel should count the appearance or number of occurrences. + /// + /// + /// + public static Tensor ragged_bincount(Tensor splits, Tensor values, Tensor size, Tensor weights, bool binary_output = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RaggedBincount", name) { args = new object[] { splits, values, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ragged_bincount_eager_fallback(splits, values, size, weights, binary_output: binary_output, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["splits"] = splits; + keywords["values"] = values; + keywords["size"] = size; + keywords["weights"] = weights; + keywords["binary_output"] = binary_output; + var _op = tf.OpDefLib._apply_op_helper("RaggedBincount", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx"), "T", _op._get_attr_type("T"), "binary_output", _op._get_attr_bool("binary_output") }; + _execute.record_gradient("RaggedBincount", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ragged_bincount_eager_fallback(Tensor splits, Tensor values, Tensor size, Tensor weights, bool binary_output, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { splits, values, size, weights }; + object[] _attrs = new object[] { "Tidx", values.dtype, "T", weights.dtype, "binary_output", binary_output }; + var _result = _execute.execute("RaggedBincount", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RaggedBincount", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Creates a sequence of numbers. + /// + /// + /// + /// This operation creates a sequence of numbers that begins at `start` and + /// extends by increments of `delta` up to but not including `limit`. + /// + /// For example: + /// + /// ``` + /// # 'start' is 3 + /// # 'limit' is 18 + /// # 'delta' is 3 + /// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor range(Tensor start, Tensor limit, Tensor delta, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Range", name) { args = new object[] { start, limit, delta }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return range_eager_fallback(start, limit, delta, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["start"] = start; + keywords["limit"] = limit; + keywords["delta"] = delta; + var _op = tf.OpDefLib._apply_op_helper("Range", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Range", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor range_eager_fallback(Tensor start, Tensor limit, Tensor delta, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { start, limit, delta }; + object[] _attrs = new object[] { "Tidx", start.dtype }; + var _result = _execute.execute("Range", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Range", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the real part of a complex number. + /// + /// + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the real part of each element in `input`. All elements in + /// `input` must be complex numbers of the form \(a + bj\), where *a* is the real + /// part returned by this operation and *b* is the imaginary part. + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.real(input) ==> [-2.25, 3.25] + /// ``` + /// + /// + /// + /// + /// + public static Tensor real(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Real", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return real_eager_fallback(input, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("Real", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("Real", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor real_eager_fallback(Tensor input, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "Tout", Tout }; + var _result = _execute.execute("Real", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Real", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x / y element-wise for real types. + /// + /// + /// + /// If `x` and `y` are reals, this will return the floating-point division. + /// + /// *NOTE*: `Div` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor real_div(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RealDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return real_div_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("RealDiv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("RealDiv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor real_div_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("RealDiv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RealDiv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the reciprocal of x element-wise. + /// + /// + /// + /// I.e., \(y = 1 / x\). + /// + /// + /// + /// + public static Tensor reciprocal(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reciprocal", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reciprocal_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Reciprocal", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Reciprocal", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reciprocal_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Reciprocal", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Reciprocal", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the inverse of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor reciprocal_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReciprocalGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reciprocal_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("ReciprocalGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ReciprocalGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reciprocal_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("ReciprocalGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReciprocalGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes a range that covers the actual values present in a quantized tensor. + /// + /// + /// + /// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a + /// range that covers the actual values present in that tensor. This op is typically + /// used to produce the `requested_output_min` and `requested_output_max` for + /// `Requantize`. + /// + /// + /// + /// + /// + /// + public static Tensor[] requantization_range(Tensor input, Tensor input_min, Tensor input_max, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizationRange", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return requantization_range_eager_fallback(input, input_min, input_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + var _op = tf.OpDefLib._apply_op_helper("RequantizationRange", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput") }; + _execute.record_gradient("RequantizationRange", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] requantization_range_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "Tinput", input.dtype }; + var _result = _execute.execute("RequantizationRange", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RequantizationRange", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes requantization range per channel. + /// + /// + /// + /// + /// + /// + /// The maximum value of the output that needs to be clipped. + /// Example: set this to 6 for Relu6. + /// + /// + /// + public static Tensor[] requantization_range_per_channel(Tensor input, Tensor input_min, Tensor input_max, float clip_value_max, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizationRangePerChannel", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["clip_value_max"] = clip_value_max } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return requantization_range_per_channel_eager_fallback(input, input_min, input_max, clip_value_max: clip_value_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["clip_value_max"] = clip_value_max; + var _op = tf.OpDefLib._apply_op_helper("RequantizationRangePerChannel", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "clip_value_max", _op.get_attr("clip_value_max") }; + _execute.record_gradient("RequantizationRangePerChannel", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] requantization_range_per_channel_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, float clip_value_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "T", input.dtype, "clip_value_max", clip_value_max }; + var _result = _execute.execute("RequantizationRangePerChannel", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RequantizationRangePerChannel", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Converts the quantized `input` tensor into a lower-precision `output`. + /// + /// + /// + /// Converts the quantized `input` tensor into a lower-precision `output`, using the + /// output range specified with `requested_output_min` and `requested_output_max`. + /// + /// `[input_min, input_max]` are scalar floats that specify the range for the float + /// interpretation of the `input` data. For example, if `input_min` is -1.0f and + /// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. Should be a lower bit depth than Tinput. + /// + /// + /// + public static Tensor[] requantize(Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Requantize", name) { args = new object[] { input, input_min, input_max, requested_output_min, requested_output_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return requantize_eager_fallback(input, input_min, input_max, requested_output_min, requested_output_max, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["requested_output_min"] = requested_output_min; + keywords["requested_output_max"] = requested_output_max; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("Requantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("Requantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] requantize_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max, requested_output_min, requested_output_max }; + object[] _attrs = new object[] { "Tinput", input.dtype, "out_type", out_type }; + var _result = _execute.execute("Requantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Requantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Requantizes input with min and max values known per channel. + /// + /// + /// + /// + /// + /// + /// + /// + /// The quantized type of output tensor that needs to be converted. + /// + /// + /// + public static Tensor[] requantize_per_channel(Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizePerChannel", name) { args = new object[] { input, input_min, input_max, requested_output_min, requested_output_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return requantize_per_channel_eager_fallback(input, input_min, input_max, requested_output_min, requested_output_max, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["requested_output_min"] = requested_output_min; + keywords["requested_output_max"] = requested_output_max; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("RequantizePerChannel", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("RequantizePerChannel", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] requantize_per_channel_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max, requested_output_min, requested_output_max }; + object[] _attrs = new object[] { "T", input.dtype, "out_type", out_type }; + var _result = _execute.execute("RequantizePerChannel", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RequantizePerChannel", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns element-wise integer closest to x. + /// + /// + /// + /// If the result is midway between two representable values, + /// the even representable is chosen. + /// For example: + /// + /// ``` + /// rint(-1.5) ==> -2.0 + /// rint(0.5000001) ==> 1.0 + /// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + /// ``` + /// + /// + /// + /// + public static Tensor rint(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rint", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return rint_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Rint", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Rint", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor rint_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Rint", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Rint", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Rounds the values of a tensor to the nearest integer, element-wise. + /// + /// + /// + /// Rounds half to even. Also known as bankers rounding. If you want to round + /// according to the current system rounding mode use std::cint. + /// + /// + /// + /// + public static Tensor round(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Round", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return round_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Round", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Round", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor round_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Round", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Round", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes reciprocal of square root of x element-wise. + /// + /// + /// + /// I.e., \(y = 1 / sqrt{x}\). + /// + /// + /// + /// + public static Tensor rsqrt(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rsqrt", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return rsqrt_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Rsqrt", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Rsqrt", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor rsqrt_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Rsqrt", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Rsqrt", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the rsqrt of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor rsqrt_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RsqrtGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return rsqrt_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("RsqrtGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("RsqrtGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor rsqrt_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("RsqrtGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RsqrtGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the maximum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = max_j(data_j)\) where `max` is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the max is empty for a given segment ID `i`, `output[i] = 0`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as the same as a smaller following index. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_max(c, tf.constant([0, 0, 1])).numpy() + /// array([[4, 3, 3, 4], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_max(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMax", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_max_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentMax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_max_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentMax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentMax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the mean along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = rac{sum_j data_j}{N}\) where `mean` is + /// over `j` such that `segment_ids[j] == i` and `N` is the total number of + /// values summed. + /// + /// If the mean is empty for a given segment ID `i`, `output[i] = 0`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as a smaller following index when computing the numerator + /// of the mean. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_mean(c, tf.constant([0, 0, 1])).numpy() + /// array([[2.5, 2.5, 2.5, 2.5], + /// [5., 6., 7., 8.]], dtype=float32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_mean(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMean", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_mean_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentMean", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentMean", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_mean_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentMean", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentMean", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the minimum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = min_j(data_j)\) where `min` is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the min is empty for a given segment ID `i`, `output[i] = 0`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as the same as a smaller following index. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_min(c, tf.constant([0, 0, 1])).numpy() + /// array([[1, 2, 2, 1], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_min(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMin", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_min_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentMin", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_min_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentMin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentMin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the product along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = prod_j data_j\) where the product is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the product is empty for a given segment ID `i`, `output[i] = 1`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as the same as a smaller following index. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_prod(c, tf.constant([0, 0, 1])).numpy() + /// array([[4, 6, 6, 4], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_prod(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentProd", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_prod_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentProd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentProd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_prod_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentProd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentProd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = sum_j data_j\) where sum is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as the same as a smaller following index. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_sum(c, tf.constant([0, 0, 1])).numpy() + /// array([[5, 5, 5, 5], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_sum(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentSum", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_sum_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentSum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentSum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_sum_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentSum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentSum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Selects elements from `t` or `e`, depending on `condition`. + /// + /// + /// + /// The `t`, and `e` tensors must all have the same shape, and the + /// output will also have that shape. + /// + /// The `condition` tensor must be a scalar if `t` and `e` are scalars. + /// If `t` and `e` are vectors or higher rank, then `condition` must be either a + /// scalar, a vector with size matching the first dimension of `t`, or must have + /// the same shape as `t`. + /// + /// The `condition` tensor acts as a mask that chooses, based on the value at each + /// element, whether the corresponding element / row in the output should be + /// taken from `t` (if true) or `e` (if false). + /// + /// If `condition` is a vector and `t` and `e` are higher rank matrices, then + /// it chooses which row (outer dimension) to copy from `t` and `e`. + /// If `condition` has the same shape as `t` and `e`, then it chooses which + /// element to copy from `t` and `e`. + /// + /// For example: + /// + /// ```python + /// # 'condition' tensor is [[True, False] + /// # [False, True]] + /// # 't' is [[1, 2], + /// # [3, 4]] + /// # 'e' is [[5, 6], + /// # [7, 8]] + /// select(condition, t, e) # => [[1, 6], [7, 4]] + /// + /// + /// # 'condition' tensor is [True, False] + /// # 't' is [[1, 2], + /// # [3, 4]] + /// # 'e' is [[5, 6], + /// # [7, 8]] + /// select(condition, t, e) ==> [[1, 2], + /// [7, 8]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor select(Tensor condition, Tensor t, Tensor e, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Select", name) { args = new object[] { condition, t, e }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return select_eager_fallback(condition, t, e, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["condition"] = condition; + keywords["t"] = t; + keywords["e"] = e; + var _op = tf.OpDefLib._apply_op_helper("Select", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Select", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor select_eager_fallback(Tensor condition, Tensor t, Tensor e, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { condition, t, e }; + object[] _attrs = new object[] { "T", t.dtype }; + var _result = _execute.execute("Select", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Select", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor select_v2(Tensor condition, Tensor t, Tensor e, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SelectV2", name) { args = new object[] { condition, t, e }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return select_v2_eager_fallback(condition, t, e, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["condition"] = condition; + keywords["t"] = t; + keywords["e"] = e; + var _op = tf.OpDefLib._apply_op_helper("SelectV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SelectV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor select_v2_eager_fallback(Tensor condition, Tensor t, Tensor e, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { condition, t, e }; + object[] _attrs = new object[] { "T", t.dtype }; + var _result = _execute.execute("SelectV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SelectV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes sigmoid of `x` element-wise. + /// + /// + /// + /// Specifically, `y = 1 / (1 + exp(-x))`. + /// + /// + /// + /// + public static Tensor sigmoid(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sigmoid", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sigmoid_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sigmoid", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sigmoid", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sigmoid_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sigmoid", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sigmoid", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient of the sigmoid of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and + /// `dy` is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor sigmoid_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SigmoidGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sigmoid_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("SigmoidGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SigmoidGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sigmoid_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("SigmoidGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SigmoidGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns an element-wise indication of the sign of a number. + /// + /// + /// + /// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. + /// + /// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. + /// + /// Example usage: + /// >>> tf.math.sign([0., 2., -3.]) + /// + /// + /// + /// + /// + public static Tensor sign(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sign", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sign_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sign", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sign", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sign_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sign", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sign", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes sine of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes sine of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `[-1,1]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) + /// tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] + /// ``` + /// + /// + /// + /// + public static Tensor sin(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sin", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sin_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sin", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sin_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes hyperbolic sine of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes hyperbolic sine of every + /// element in the tensor. Input range is `[-inf,inf]` and output range + /// is `[-inf,inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + /// tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] + /// ``` + /// + /// + /// + /// + public static Tensor sinh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sinh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sinh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sinh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sinh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sinh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sinh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sinh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Generates points from the Sobol sequence. + /// + /// + /// + /// Creates a Sobol sequence with `num_results` samples. Each sample has dimension + /// `dim`. Skips the first `skip` samples. + /// + /// + /// + /// + /// + /// + /// + /// The type of the sample. One of: `float32` or `float64`. + /// + /// + /// + public static Tensor sobol_sample(Tensor dim, Tensor num_results, Tensor skip, TF_DataType dtype = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SobolSample", name) { args = new object[] { dim, num_results, skip }, attrs = new Dictionary() { ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sobol_sample_eager_fallback(dim, num_results, skip, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dim"] = dim; + keywords["num_results"] = num_results; + keywords["skip"] = skip; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("SobolSample", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("SobolSample", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sobol_sample_eager_fallback(Tensor dim, Tensor num_results, Tensor skip, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { dim, num_results, skip }; + object[] _attrs = new object[] { "dtype", dtype }; + var _result = _execute.execute("SobolSample", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SobolSample", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// bool; Whether the kernel should count the appearance or number of occurrences. + /// + /// + /// + public static Tensor sparse_bincount(Tensor indices, Tensor values, Tensor dense_shape, Tensor size, Tensor weights, bool binary_output = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseBincount", name) { args = new object[] { indices, values, dense_shape, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_bincount_eager_fallback(indices, values, dense_shape, size, weights, binary_output: binary_output, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["indices"] = indices; + keywords["values"] = values; + keywords["dense_shape"] = dense_shape; + keywords["size"] = size; + keywords["weights"] = weights; + keywords["binary_output"] = binary_output; + var _op = tf.OpDefLib._apply_op_helper("SparseBincount", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx"), "T", _op._get_attr_type("T"), "binary_output", _op._get_attr_bool("binary_output") }; + _execute.record_gradient("SparseBincount", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_bincount_eager_fallback(Tensor indices, Tensor values, Tensor dense_shape, Tensor size, Tensor weights, bool binary_output, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { indices, values, dense_shape, size, weights }; + object[] _attrs = new object[] { "Tidx", values.dtype, "T", weights.dtype, "binary_output", binary_output }; + var _result = _execute.execute("SparseBincount", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseBincount", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiply matrix "a" by matrix "b". + /// + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of "a" must + /// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not + /// `SparseTensor`s. This op is optimized for the case where at least one of "a" or + /// "b" is sparse, in the sense that they have a large proportion of zero values. + /// The breakeven for using this versus a dense matrix multiply on one platform was + /// 30% zero values in the sparse matrix. + /// + /// The gradient computation of this operation will only take advantage of sparsity + /// in the input gradient when that gradient comes from a Relu. + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, bool a_is_sparse = false, bool b_is_sparse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseMatMul", name) { args = new object[] { a, b }, attrs = new Dictionary() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["a_is_sparse"] = a_is_sparse, ["b_is_sparse"] = b_is_sparse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_mat_mul_eager_fallback(a, b, transpose_a: transpose_a, transpose_b: transpose_b, a_is_sparse: a_is_sparse, b_is_sparse: b_is_sparse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["a_is_sparse"] = a_is_sparse; + keywords["b_is_sparse"] = b_is_sparse; + var _op = tf.OpDefLib._apply_op_helper("SparseMatMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "a_is_sparse", _op._get_attr_bool("a_is_sparse"), "b_is_sparse", _op._get_attr_bool("b_is_sparse"), "Ta", _op._get_attr_type("Ta"), "Tb", _op._get_attr_type("Tb") }; + _execute.record_gradient("SparseMatMul", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_mat_mul_eager_fallback(Tensor a, Tensor b, bool transpose_a, bool transpose_b, bool a_is_sparse, bool b_is_sparse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b }; + object[] _attrs = new object[] { "transpose_a", transpose_a, "transpose_b", transpose_b, "a_is_sparse", a_is_sparse, "b_is_sparse", b_is_sparse, "Ta", a.dtype, "Tb", b.dtype }; + var _result = _execute.execute("SparseMatMul", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseMatMul", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the mean along sparse segments of a tensor. + /// + /// + /// + /// See `tf.sparse.segment_sum` for usage examples. + /// + /// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first + /// dimension, selecting a subset of dimension 0, specified by `indices`. + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_mean(Tensor data, Tensor indices, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMean", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_mean_eager_fallback(data, indices, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentMean", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentMean", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_mean_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentMean", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentMean", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients for SparseSegmentMean. + /// + /// + /// + /// Returns tensor "output" with same shape as grad, except for dimension 0 whose + /// value is output_dim0. + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_mean_grad(Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMeanGrad", name) { args = new object[] { grad, indices, segment_ids, output_dim0 }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_mean_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["grad"] = grad; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + keywords["output_dim0"] = output_dim0; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentMeanGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentMeanGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_mean_grad_eager_fallback(Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { grad, indices, segment_ids, output_dim0 }; + object[] _attrs = new object[] { "T", grad.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentMeanGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentMeanGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the mean along sparse segments of a tensor. + /// + /// + /// + /// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is + /// missing, the `output` tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_mean_with_num_segments(Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMeanWithNumSegments", name) { args = new object[] { data, indices, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_mean_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentMeanWithNumSegments", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tnumsegments", _op._get_attr_type("Tnumsegments"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentMeanWithNumSegments", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_mean_with_num_segments_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tnumsegments", num_segments.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentMeanWithNumSegments", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. + /// + /// + /// + /// N is the size of the segment being reduced. + /// + /// See `tf.sparse.segment_sum` for usage examples. + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_sqrt_n(Tensor data, Tensor indices, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSqrtN", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_sqrt_n_eager_fallback(data, indices, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentSqrtN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentSqrtN", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_sqrt_n_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentSqrtN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentSqrtN", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along sparse segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first + /// dimension, selecting a subset of dimension 0, specified by `indices`. + /// + /// For example: + /// + /// ```python + /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + /// + /// # Select two rows, one segment. + /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + /// # => [[0 0 0 0]] + /// + /// # Select two rows, two segment. + /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + /// # => [[ 1 2 3 4] + /// # [-1 -2 -3 -4]] + /// + /// # Select all rows, two segments. + /// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + /// # => [[0 0 0 0] + /// # [5 6 7 8]] + /// + /// # Which is equivalent to: + /// tf.segment_sum(c, tf.constant([0, 0, 1])) + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_sum(Tensor data, Tensor indices, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSum", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_sum_eager_fallback(data, indices, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentSum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentSum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_sum_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentSum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentSum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients for SparseSegmentSum. + /// + /// + /// + /// Returns tensor "output" with same shape as grad, except for dimension 0 whose + /// value is output_dim0. + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_sum_grad(Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSumGrad", name) { args = new object[] { grad, indices, segment_ids, output_dim0 }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_sum_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["grad"] = grad; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + keywords["output_dim0"] = output_dim0; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentSumGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentSumGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_sum_grad_eager_fallback(Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { grad, indices, segment_ids, output_dim0 }; + object[] _attrs = new object[] { "T", grad.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentSumGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentSumGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along sparse segments of a tensor. + /// + /// + /// + /// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is + /// missing, the `output` tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) + /// for an explanation of segments. + /// + /// For example: + /// + /// ```python + /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + /// + /// tf.sparse_segment_sum_with_num_segments( + /// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + /// # => [[0 0 0 0] + /// # [0 0 0 0] + /// # [0 0 0 0]] + /// + /// tf.sparse_segment_sum_with_num_segments(c, + /// tf.constant([0, 1]), + /// tf.constant([0, 2], + /// num_segments=4)) + /// # => [[ 1 2 3 4] + /// # [ 0 0 0 0] + /// # [-1 -2 -3 -4] + /// # [ 0 0 0 0]] + /// ``` + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_sum_with_num_segments(Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSumWithNumSegments", name) { args = new object[] { data, indices, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_sum_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentSumWithNumSegments", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tnumsegments", _op._get_attr_type("Tnumsegments"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentSumWithNumSegments", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_sum_with_num_segments_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tnumsegments", num_segments.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentSumWithNumSegments", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes square root of x element-wise. + /// + /// + /// + /// I.e., \(y = sqrt{x} = x^{1/2}\). + /// + /// + /// + /// + public static Tensor sqrt(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sqrt", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sqrt_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sqrt", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sqrt", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sqrt_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sqrt", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sqrt", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the sqrt of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor sqrt_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SqrtGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sqrt_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("SqrtGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SqrtGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sqrt_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("SqrtGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SqrtGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes square of x element-wise. + /// + /// + /// + /// I.e., \(y = x * x = x^2\). + /// + /// + /// + /// + public static Tensor square(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Square", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return square_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Square", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Square", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor square_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Square", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Square", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns conj(x - y)(x - y) element-wise. + /// + /// + /// + /// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor squared_difference(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SquaredDifference", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return squared_difference_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("SquaredDifference", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SquaredDifference", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor squared_difference_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("SquaredDifference", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SquaredDifference", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x - y element-wise. + /// + /// + /// + /// *NOTE*: `Sub` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor sub(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sub", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sub_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Sub", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sub", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sub_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sub", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sub", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor sum(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sum", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sum_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Sum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Sum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sum_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Sum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes tan of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes tangent of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `(-inf, inf)`. If input lies outside the boundary, `nan` + /// is returned. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] + /// ``` + /// + /// + /// + /// + public static Tensor tan(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tan_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Tan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Tan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tan_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Tan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Tan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes hyperbolic tangent of `x` element-wise. + /// + /// + /// + /// Given an input tensor, this function computes hyperbolic tangent of every + /// element in the tensor. Input range is `[-inf, inf]` and + /// output range is `[-1,1]`. + /// + /// >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) + /// >>> tf.math.tanh(x) + /// + /// + /// + /// + /// + /// + public static Tensor tanh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tanh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tanh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Tanh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Tanh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tanh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Tanh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Tanh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the tanh of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor tanh_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TanhGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tanh_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("TanhGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("TanhGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tanh_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("TanhGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TanhGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x / y element-wise for integer types. + /// + /// + /// + /// Truncation designates that negative numbers will round fractional quantities + /// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different + /// than Python semantics. See `FloorDiv` for a division function that matches + /// Python Semantics. + /// + /// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor truncate_div(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TruncateDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return truncate_div_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("TruncateDiv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("TruncateDiv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor truncate_div_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("TruncateDiv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TruncateDiv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise remainder of division. This emulates C semantics in that + /// + /// + /// + /// the result here is consistent with a truncating divide. E.g. `truncate(x / y) * + /// y + truncate_mod(x, y) = x`. + /// + /// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor truncate_mod(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TruncateMod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return truncate_mod_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("TruncateMod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("TruncateMod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor truncate_mod_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("TruncateMod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TruncateMod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the maximum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to `tf.math.unsorted_segment_sum`, + /// Instead of computing the sum over segments, it computes the maximum such that: + /// + /// \(output_i = max_{j...} data[j...]\) where max is over tuples `j...` such + /// that `segment_ids[j...] == i`. + /// + /// If the maximum is empty for a given segment ID `i`, it outputs the smallest + /// possible value for the specific numeric type, + /// `output[i] = numeric_limits::lowest()`. + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be less than + /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + /// result in safe but unspecified behavior, which may include ignoring + /// out-of-bound indices or outputting a tensor with a 0 stored in the first + /// dimension of its shape if `num_segments` is 0. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// >>> tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + /// array([[4, 3, 3, 4], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + /// + public static Tensor unsorted_segment_max(Tensor data, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentMax", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unsorted_segment_max_eager_fallback(data, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices"), "Tnumsegments", _op._get_attr_type("Tnumsegments") }; + _execute.record_gradient("UnsortedSegmentMax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unsorted_segment_max_eager_fallback(Tensor data, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype, "Tnumsegments", num_segments.dtype }; + var _result = _execute.execute("UnsortedSegmentMax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnsortedSegmentMax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the minimum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to `tf.math.unsorted_segment_sum`, + /// Instead of computing the sum over segments, it computes the minimum such that: + /// + /// \(output_i = min_{j...} data_[j...]\) where min is over tuples `j...` such + /// that `segment_ids[j...] == i`. + /// + /// If the minimum is empty for a given segment ID `i`, it outputs the largest + /// possible value for the specific numeric type, + /// `output[i] = numeric_limits::max()`. + /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// >>> tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + /// array([[1, 2, 2, 1], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be less than + /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + /// result in safe but unspecified behavior, which may include ignoring + /// out-of-bound indices or outputting a tensor with a 0 stored in the first + /// dimension of its shape if `num_segments` is 0. + /// + /// + /// + /// + /// + /// + public static Tensor unsorted_segment_min(Tensor data, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentMin", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unsorted_segment_min_eager_fallback(data, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices"), "Tnumsegments", _op._get_attr_type("Tnumsegments") }; + _execute.record_gradient("UnsortedSegmentMin", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unsorted_segment_min_eager_fallback(Tensor data, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype, "Tnumsegments", num_segments.dtype }; + var _result = _execute.execute("UnsortedSegmentMin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnsortedSegmentMin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the product along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to `tf.math.unsorted_segment_sum`, + /// Instead of computing the sum over segments, it computes the product of all + /// entries belonging to a segment such that: + /// + /// \(output_i = prod_{j...} data[j...]\) where the product is over tuples + /// `j...` such that `segment_ids[j...] == i`. + /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// >>> tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + /// array([[4, 6, 6, 4], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// If there is no entry for a given segment ID `i`, it outputs 1. + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// Caution: On CPU, values in `segment_ids` are always validated to be less than + /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + /// result in safe but unspecified behavior, which may include ignoring + /// out-of-bound indices or outputting a tensor with a 0 stored in the first + /// dimension of its shape if `num_segments` is 0. + /// + /// + /// + /// + /// + /// + /// + public static Tensor unsorted_segment_prod(Tensor data, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentProd", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unsorted_segment_prod_eager_fallback(data, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentProd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices"), "Tnumsegments", _op._get_attr_type("Tnumsegments") }; + _execute.record_gradient("UnsortedSegmentProd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unsorted_segment_prod_eager_fallback(Tensor data, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype, "Tnumsegments", num_segments.dtype }; + var _result = _execute.execute("UnsortedSegmentProd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnsortedSegmentProd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output[i] = sum_{j...} data[j...]\) where the sum is over tuples `j...` such + /// that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` + /// need not be sorted and need not cover all values in the full + /// range of valid values. + /// + /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. + /// If the given segment ID `i` is negative, the value is dropped and will not be + /// added to the sum of the segment. + /// + /// `num_segments` should equal the number of distinct segment IDs. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be less than + /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + /// result in safe but unspecified behavior, which may include ignoring + /// out-of-bound indices or outputting a tensor with a 0 stored in the first + /// dimension of its shape if `num_segments` is 0. + /// + ///
+ /// + ///
+ /// + /// >>> c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]] + /// >>> tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy() + /// array([[5, 5, 5, 5], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + /// + ///
+ /// + /// + /// + /// + public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentSum", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unsorted_segment_sum_eager_fallback(data, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentSum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices"), "Tnumsegments", _op._get_attr_type("Tnumsegments") }; + _execute.record_gradient("UnsortedSegmentSum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unsorted_segment_sum_eager_fallback(Tensor data, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype, "Tnumsegments", num_segments.dtype }; + var _result = _execute.execute("UnsortedSegmentSum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnsortedSegmentSum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns 0 if x == 0, and x / y otherwise, elementwise. + /// + /// + /// + /// + public static Tensor xdivy(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xdivy", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return xdivy_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Xdivy", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Xdivy", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor xdivy_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Xdivy", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Xdivy", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. + /// + /// + /// + /// + public static Tensor xlog1py(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xlog1py", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return xlog1py_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Xlog1py", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Xlog1py", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor xlog1py_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Xlog1py", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Xlog1py", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns 0 if x == 0, and x * log(y) otherwise, elementwise. + /// + /// + /// + /// + public static Tensor xlogy(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xlogy", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return xlogy_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Xlogy", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Xlogy", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor xlogy_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Xlogy", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Xlogy", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the Hurwitz zeta function \\(\zeta(x, q)\\). + /// + /// + /// + /// The Hurwitz zeta function is defined as: + /// + /// + /// \(zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x}\) + /// + /// + /// + /// + /// + public static Tensor zeta(Tensor x, Tensor q, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Zeta", name) { args = new object[] { x, q }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return zeta_eager_fallback(x, q, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["q"] = q; + var _op = tf.OpDefLib._apply_op_helper("Zeta", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Zeta", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor zeta_eager_fallback(Tensor x, Tensor q, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, q }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Zeta", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Zeta", _inputs_flat, _attrs, _result); + } + return _result[0]; } } diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs deleted file mode 100644 index 8e6e72d12..000000000 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs +++ /dev/null @@ -1,11 +0,0 @@ -using System; -using static Tensorflow.Binding; - -namespace Tensorflow -{ - public static partial class gen_math_ops - { - public static Tensor mul(IntPtr x, IntPtr y, string name = null) - => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(x, y)); - } -} diff --git a/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs new file mode 100644 index 000000000..c0cec2785 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs @@ -0,0 +1,8084 @@ +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ + +using Tensorflow.Eager; +using Tensorflow.Contexts; +using static Tensorflow.Binding; + +namespace Tensorflow; + +public static class gen_nn_ops +{ + /// + /// Returns min/max k values and their indices of the input operand in an approximate manner. + /// + /// + /// + /// See https://arxiv.org/abs/2206.14286 for the algorithm details. + /// This op is only optimized on TPU currently. + /// + /// + /// + /// + /// Specifies the number of min/max-k. + /// + /// + /// Integer dimension along which to search. Default: -1. + /// + /// + /// Recall target for the approximation. Range in (0,1] + /// + /// + /// When true, computes max-k; otherwise computes min-k. + /// + /// + /// + /// When set to a positive value, it overrides the size determined by + /// `input[reduction_dim]` for evaluating the recall. This option is useful when + /// the given `input` is only a subset of the overall computation in SPMD or + /// distributed pipelines, where the true input size cannot be deferred by the + /// `input` shape. + /// + /// + /// + /// + /// When true, aggregates approximate results to top-k. When false, returns the + /// approximate results. The number of the approximate results is implementation + /// defined and is greater equals to the specified `k`. + /// + /// + /// + public static Tensor[] approx_top_k(Tensor input, int k = 0, int reduction_dimension = -1, float recall_target = 0.95f, bool is_max_k = true, int reduction_input_size_override = -1, bool aggregate_to_topk = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ApproxTopK", name) { args = new object[] { input }, attrs = new Dictionary() { ["k"] = k, ["reduction_dimension"] = reduction_dimension, ["recall_target"] = recall_target, ["is_max_k"] = is_max_k, ["reduction_input_size_override"] = reduction_input_size_override, ["aggregate_to_topk"] = aggregate_to_topk } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return approx_top_k_eager_fallback(input, k: k, reduction_dimension: reduction_dimension, recall_target: recall_target, is_max_k: is_max_k, reduction_input_size_override: reduction_input_size_override, aggregate_to_topk: aggregate_to_topk, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["reduction_dimension"] = reduction_dimension; + keywords["recall_target"] = recall_target; + keywords["is_max_k"] = is_max_k; + keywords["reduction_input_size_override"] = reduction_input_size_override; + keywords["aggregate_to_topk"] = aggregate_to_topk; + var _op = tf.OpDefLib._apply_op_helper("ApproxTopK", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "k", _op._get_attr_int("k"), "reduction_dimension", _op._get_attr_int("reduction_dimension"), "recall_target", _op.get_attr("recall_target"), "is_max_k", _op._get_attr_bool("is_max_k"), "reduction_input_size_override", _op._get_attr_int("reduction_input_size_override"), "aggregate_to_topk", _op._get_attr_bool("aggregate_to_topk"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("ApproxTopK", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] approx_top_k_eager_fallback(Tensor input, int k, int reduction_dimension, float recall_target, bool is_max_k, int reduction_input_size_override, bool aggregate_to_topk, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "k", k, "reduction_dimension", reduction_dimension, "recall_target", recall_target, "is_max_k", is_max_k, "reduction_input_size_override", reduction_input_size_override, "aggregate_to_topk", aggregate_to_topk, "T", input.dtype }; + var _result = _execute.execute("ApproxTopK", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ApproxTopK", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Performs average pooling on the input. + /// + /// + /// + /// Each entry in `output` is the mean of the corresponding size `ksize` + /// window in `value`. + /// + /// + /// + /// + /// + /// The size of the sliding window for each dimension of `value`. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of `value`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor avg_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return avg_pool_eager_fallback(value, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("AvgPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AvgPool", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor avg_pool_eager_fallback(Tensor value, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", value.dtype }; + var _result = _execute.execute("AvgPool", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AvgPool", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs 3D average pooling on the input. + /// + /// + /// + /// Each entry in `output` is the mean of the corresponding size `ksize` window in + /// `value`. + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor avg_pool3d(Tensor input, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool3D", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return avg_pool3d_eager_fallback(input, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("AvgPool3D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AvgPool3D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor avg_pool3d_eager_fallback(Tensor input, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", input.dtype }; + var _result = _execute.execute("AvgPool3D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AvgPool3D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of average pooling function. + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor avg_pool3d_grad(Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool3DGrad", name) { args = new object[] { orig_input_shape, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return avg_pool3d_grad_eager_fallback(orig_input_shape, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["orig_input_shape"] = orig_input_shape; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("AvgPool3DGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AvgPool3DGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor avg_pool3d_grad_eager_fallback(Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input_shape, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", grad.dtype }; + var _result = _execute.execute("AvgPool3DGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AvgPool3DGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of the average pooling function. + /// + /// + /// + /// + /// + /// The size of the sliding window for each dimension of the input. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor avg_pool_grad(Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPoolGrad", name) { args = new object[] { orig_input_shape, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return avg_pool_grad_eager_fallback(orig_input_shape, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input_shape"] = orig_input_shape; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("AvgPoolGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AvgPoolGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor avg_pool_grad_eager_fallback(Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input_shape, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", grad.dtype }; + var _result = _execute.execute("AvgPoolGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AvgPoolGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Batch normalization. + /// + /// + /// + /// This op is deprecated. Prefer `tf.nn.batch_normalization`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// + /// + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// + public static Tensor batch_norm_with_global_normalization(Tensor t, Tensor m, Tensor v, Tensor beta, Tensor gamma, float variance_epsilon, bool scale_after_normalization, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchNormWithGlobalNormalization", name) { args = new object[] { t, m, v, beta, gamma }, attrs = new Dictionary() { ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_norm_with_global_normalization_eager_fallback(t, m, v, beta, gamma, variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["t"] = t; + keywords["m"] = m; + keywords["v"] = v; + keywords["beta"] = beta; + keywords["gamma"] = gamma; + keywords["variance_epsilon"] = variance_epsilon; + keywords["scale_after_normalization"] = scale_after_normalization; + var _op = tf.OpDefLib._apply_op_helper("BatchNormWithGlobalNormalization", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "variance_epsilon", _op.get_attr("variance_epsilon"), "scale_after_normalization", _op._get_attr_bool("scale_after_normalization") }; + _execute.record_gradient("BatchNormWithGlobalNormalization", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor batch_norm_with_global_normalization_eager_fallback(Tensor t, Tensor m, Tensor v, Tensor beta, Tensor gamma, float variance_epsilon, bool scale_after_normalization, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { t, m, v, beta, gamma }; + object[] _attrs = new object[] { "T", t.dtype, "variance_epsilon", variance_epsilon, "scale_after_normalization", scale_after_normalization }; + var _result = _execute.execute("BatchNormWithGlobalNormalization", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gradients for batch normalization. + /// + /// + /// + /// This op is deprecated. See `tf.nn.batch_normalization`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// + /// + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// + public static Tensor[] batch_norm_with_global_normalization_grad(Tensor t, Tensor m, Tensor v, Tensor gamma, Tensor backprop, float variance_epsilon, bool scale_after_normalization, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchNormWithGlobalNormalizationGrad", name) { args = new object[] { t, m, v, gamma, backprop }, attrs = new Dictionary() { ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return batch_norm_with_global_normalization_grad_eager_fallback(t, m, v, gamma, backprop, variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["t"] = t; + keywords["m"] = m; + keywords["v"] = v; + keywords["gamma"] = gamma; + keywords["backprop"] = backprop; + keywords["variance_epsilon"] = variance_epsilon; + keywords["scale_after_normalization"] = scale_after_normalization; + var _op = tf.OpDefLib._apply_op_helper("BatchNormWithGlobalNormalizationGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "variance_epsilon", _op.get_attr("variance_epsilon"), "scale_after_normalization", _op._get_attr_bool("scale_after_normalization") }; + _execute.record_gradient("BatchNormWithGlobalNormalizationGrad", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] batch_norm_with_global_normalization_grad_eager_fallback(Tensor t, Tensor m, Tensor v, Tensor gamma, Tensor backprop, float variance_epsilon, bool scale_after_normalization, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { t, m, v, gamma, backprop }; + object[] _attrs = new object[] { "T", t.dtype, "variance_epsilon", variance_epsilon, "scale_after_normalization", scale_after_normalization }; + var _result = _execute.execute("BatchNormWithGlobalNormalizationGrad", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Adds `bias` to `value`. + /// + /// + /// + /// This is a special case of `tf.add` where `bias` is restricted to be 1-D. + /// Broadcasting is supported, so `value` may have any number of dimensions. + /// + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the bias tensor will be added to the last dimension + /// of the value tensor. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// The tensor will be added to "in_channels", the third-to-the-last + /// dimension. + /// + /// + /// + public static Tensor bias_add(Tensor value, Tensor bias, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAdd", name) { args = new object[] { value, bias }, attrs = new Dictionary() { ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bias_add_eager_fallback(value, bias, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["bias"] = bias; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("BiasAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("BiasAdd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bias_add_eager_fallback(Tensor value, Tensor bias, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value, bias }; + object[] _attrs = new object[] { "T", value.dtype, "data_format", data_format }; + var _result = _execute.execute("BiasAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BiasAdd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// The backward operation for "BiasAdd" on the "bias" tensor. + /// + /// + /// + /// It accumulates all the values from out_backprop into the feature dimension. + /// For NHWC data format, the feature dimension is the last. For NCHW data format, + /// the feature dimension is the third-to-last. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the bias tensor will be added to the last dimension + /// of the value tensor. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// The tensor will be added to "in_channels", the third-to-the-last + /// dimension. + /// + /// + /// + public static Tensor bias_add_grad(Tensor out_backprop, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAddGrad", name) { args = new object[] { out_backprop }, attrs = new Dictionary() { ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bias_add_grad_eager_fallback(out_backprop, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["out_backprop"] = out_backprop; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("BiasAddGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("BiasAddGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bias_add_grad_eager_fallback(Tensor out_backprop, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { out_backprop }; + object[] _attrs = new object[] { "T", out_backprop.dtype, "data_format", data_format }; + var _result = _execute.execute("BiasAddGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BiasAddGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Adds `bias` to `value`. + /// + /// + /// + /// This is a deprecated version of BiasAdd and will be soon removed. + /// + /// This is a special case of `tf.add` where `bias` is restricted to be 1-D. + /// Broadcasting is supported, so `value` may have any number of dimensions. + /// + /// + /// + /// + /// + public static Tensor bias_add_v1(Tensor value, Tensor bias, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAddV1", name) { args = new object[] { value, bias }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bias_add_v1_eager_fallback(value, bias, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["bias"] = bias; + var _op = tf.OpDefLib._apply_op_helper("BiasAddV1", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BiasAddV1", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bias_add_v1_eager_fallback(Tensor value, Tensor bias, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value, bias }; + object[] _attrs = new object[] { "T", value.dtype }; + var _result = _execute.execute("BiasAddV1", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BiasAddV1", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes a 2-D convolution given 4-D `input` and `filter` tensors. + /// + /// + /// + /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + /// and a filter / kernel tensor of shape + /// `[filter_height, filter_width, in_channels, out_channels]`, this op + /// performs the following: + /// + /// 1. Flattens the filter to a 2-D matrix with shape + /// `[filter_height * filter_width * in_channels, output_channels]`. + /// 2. Extracts image patches from the input tensor to form a *virtual* + /// tensor of shape `[batch, out_height, out_width, + /// filter_height * filter_width * in_channels]`. + /// 3. For each patch, right-multiplies the filter matrix and the image patch + /// vector. + /// + /// In detail, with the default NHWC format, + /// + /// output[b, i, j, k] = + /// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + /// filter[di, dj, q, k] + /// + /// Must have `strides[0] = strides[3] = 1`. For the most common case of the same + /// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 4. The stride of the sliding window for each + /// dimension of `input`. The dimension order is determined by the value of + /// `data_format`, see below for details. + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor conv2d(Tensor input, Tensor filter, int[] strides, string padding, bool use_cudnn_on_gpu = true, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv2d_eager_fallback(input, filter, strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["strides"] = strides; + keywords["use_cudnn_on_gpu"] = use_cudnn_on_gpu; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "use_cudnn_on_gpu", _op._get_attr_bool("use_cudnn_on_gpu"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv2D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv2d_eager_fallback(Tensor input, Tensor filter, int[] strides, bool use_cudnn_on_gpu, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv2D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv2D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of convolution with respect to the filter. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// of the convolution. Must be in the same order as the dimension specified with + /// format. + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor conv2d_backprop_filter(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, bool use_cudnn_on_gpu = true, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2DBackpropFilter", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv2d_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter_sizes"] = filter_sizes; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["use_cudnn_on_gpu"] = use_cudnn_on_gpu; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropFilter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "use_cudnn_on_gpu", _op._get_attr_bool("use_cudnn_on_gpu"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv2DBackpropFilter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv2d_backprop_filter_eager_fallback(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, bool use_cudnn_on_gpu, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter_sizes, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv2DBackpropFilter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv2DBackpropFilter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of convolution with respect to the input. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// of the convolution. Must be in the same order as the dimension specified with + /// format. + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor conv2d_backprop_input(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, bool use_cudnn_on_gpu = true, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2DBackpropInput", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv2d_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input_sizes"] = input_sizes; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["use_cudnn_on_gpu"] = use_cudnn_on_gpu; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropInput", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "use_cudnn_on_gpu", _op._get_attr_bool("use_cudnn_on_gpu"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv2DBackpropInput", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv2d_backprop_input_eager_fallback(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, bool use_cudnn_on_gpu, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_sizes, filter, out_backprop }; + object[] _attrs = new object[] { "T", filter.dtype, "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv2DBackpropInput", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv2DBackpropInput", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes a 3-D convolution given 5-D `input` and `filter` tensors. + /// + /// + /// + /// In signal processing, cross-correlation is a measure of similarity of + /// two waveforms as a function of a time-lag applied to one of them. This + /// is also known as a sliding dot product or sliding inner-product. + /// + /// Our Conv3D implements a form of cross-correlation. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor conv3d(Tensor input, Tensor filter, int[] strides, string padding, string data_format = "NDHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_eager_fallback(input, filter, strides: strides, padding: padding, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv3D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_eager_fallback(Tensor input, Tensor filter, int[] strides, string padding, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv3D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of 3-D convolution with respect to the filter. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + public static Tensor conv3d_backprop_filter(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropFilter", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_backprop_filter_eager_fallback(input, filter, out_backprop, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3DBackpropFilter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv3DBackpropFilter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_backprop_filter_eager_fallback(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("Conv3DBackpropFilter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3DBackpropFilter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of 3-D convolution with respect to the filter. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor conv3d_backprop_filter_v2(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, string data_format = "NDHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropFilterV2", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_backprop_filter_v2_eager_fallback(input, filter_sizes, out_backprop, strides: strides, padding: padding, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter_sizes"] = filter_sizes; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3DBackpropFilterV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv3DBackpropFilterV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_backprop_filter_v2_eager_fallback(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter_sizes, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv3DBackpropFilterV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of 3-D convolution with respect to the input. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + public static Tensor conv3d_backprop_input(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropInput", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_backprop_input_eager_fallback(input, filter, out_backprop, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3DBackpropInput", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv3DBackpropInput", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_backprop_input_eager_fallback(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("Conv3DBackpropInput", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3DBackpropInput", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of 3-D convolution with respect to the input. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor conv3d_backprop_input_v2(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, string data_format = "NDHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropInputV2", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_backprop_input_v2_eager_fallback(input_sizes, filter, out_backprop, strides: strides, padding: padding, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input_sizes"] = input_sizes; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3DBackpropInputV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations"), "Tshape", _op._get_attr_type("Tshape") }; + _execute.record_gradient("Conv3DBackpropInputV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_backprop_input_v2_eager_fallback(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_sizes, filter, out_backprop }; + object[] _attrs = new object[] { "T", filter.dtype, "strides", strides, "padding", padding, "data_format", data_format, "dilations", dilations, "Tshape", input_sizes.dtype }; + var _result = _execute.execute("Conv3DBackpropInputV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3DBackpropInputV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the dimension index in the destination data format given the one in + /// + /// + /// + /// the source data format. + /// + /// + /// + /// + /// + /// source data format. + /// + /// + /// + /// + /// destination data format. + /// + /// + /// + public static Tensor data_format_dim_map(Tensor x, string src_format = "NHWC", string dst_format = "NCHW", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DataFormatDimMap", name) { args = new object[] { x }, attrs = new Dictionary() { ["src_format"] = src_format, ["dst_format"] = dst_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return data_format_dim_map_eager_fallback(x, src_format: src_format, dst_format: dst_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (src_format is null) + { + src_format = "NHWC"; + } + if (dst_format is null) + { + dst_format = "NCHW"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["src_format"] = src_format; + keywords["dst_format"] = dst_format; + var _op = tf.OpDefLib._apply_op_helper("DataFormatDimMap", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "src_format", _op.get_attr("src_format"), "dst_format", _op.get_attr("dst_format") }; + _execute.record_gradient("DataFormatDimMap", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor data_format_dim_map_eager_fallback(Tensor x, string src_format, string dst_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "src_format", src_format, "dst_format", dst_format }; + var _result = _execute.execute("DataFormatDimMap", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DataFormatDimMap", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Permute input tensor from `src_format` to `dst_format`. + /// + /// + /// + /// Given source and destination format strings of length n=4 or 5, the input + /// tensor must be a vector of size n or n-2, or a 2D tensor of shape + /// (n, 2) or (n-2, 2). + /// + /// If the first dimension of the input tensor is n-2, it is assumed that + /// non-spatial dimensions are omitted (i.e `N`, `C`). + /// + /// For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and input: + /// ``` + /// [1, 2, 3, 4] + /// ``` + /// , the output will be: + /// ``` + /// [1, 4, 2, 3] + /// ``` + /// With `src_format` of `NDHWC`, `dst_format` of `NCDHW`, and input: + /// ``` + /// [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]] + /// ``` + /// , the output will be: + /// ``` + /// [[1, 6], [5, 10], [2, 7], [3, 8], [4, 9]] + /// ``` + /// With `src_format` of `NHWC`, `dst_format` of `NCHW`, and input: + /// ``` + /// [1, 2] + /// ``` + /// , the output will be: + /// ``` + /// [1, 2] + /// ``` + /// + /// + /// + /// + /// + /// source data format. + /// + /// + /// + /// + /// destination data format. + /// + /// + /// + public static Tensor data_format_vec_permute(Tensor x, string src_format = "NHWC", string dst_format = "NCHW", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DataFormatVecPermute", name) { args = new object[] { x }, attrs = new Dictionary() { ["src_format"] = src_format, ["dst_format"] = dst_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return data_format_vec_permute_eager_fallback(x, src_format: src_format, dst_format: dst_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (src_format is null) + { + src_format = "NHWC"; + } + if (dst_format is null) + { + dst_format = "NCHW"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["src_format"] = src_format; + keywords["dst_format"] = dst_format; + var _op = tf.OpDefLib._apply_op_helper("DataFormatVecPermute", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "src_format", _op.get_attr("src_format"), "dst_format", _op.get_attr("dst_format") }; + _execute.record_gradient("DataFormatVecPermute", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor data_format_vec_permute_eager_fallback(Tensor x, string src_format, string dst_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "src_format", src_format, "dst_format", dst_format }; + var _result = _execute.execute("DataFormatVecPermute", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DataFormatVecPermute", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. + /// + /// + /// + /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + /// and a filter / kernel tensor of shape + /// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + /// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies + /// a different filter to each input channel (expanding from 1 channel to + /// `channel_multiplier` channels for each), then concatenates the results + /// together. Thus, the output has `in_channels * channel_multiplier` channels. + /// + /// ``` + /// for k in 0..in_channels-1 + /// for q in 0..channel_multiplier-1 + /// output[b, i, j, k * channel_multiplier + q] = + /// sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * + /// filter[di, dj, k, q] + /// ``` + /// + /// Must have `strides[0] = strides[3] = 1`. For the most common case of the same + /// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + /// + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor depthwise_conv2d_native(Tensor input, Tensor filter, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNative", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return depthwise_conv2d_native_eager_fallback(input, filter, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNative", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("DepthwiseConv2dNative", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor depthwise_conv2d_native_eager_fallback(Tensor input, Tensor filter, int[] strides, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("DepthwiseConv2dNative", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DepthwiseConv2dNative", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of depthwise convolution with respect to the filter. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// of the convolution. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor depthwise_conv2d_native_backprop_filter(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNativeBackpropFilter", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return depthwise_conv2d_native_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter_sizes"] = filter_sizes; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNativeBackpropFilter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("DepthwiseConv2dNativeBackpropFilter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor depthwise_conv2d_native_backprop_filter_eager_fallback(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter_sizes, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("DepthwiseConv2dNativeBackpropFilter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of depthwise convolution with respect to the input. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// of the convolution. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor depthwise_conv2d_native_backprop_input(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNativeBackpropInput", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return depthwise_conv2d_native_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input_sizes"] = input_sizes; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNativeBackpropInput", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("DepthwiseConv2dNativeBackpropInput", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor depthwise_conv2d_native_backprop_input_eager_fallback(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_sizes, filter, out_backprop }; + object[] _attrs = new object[] { "T", filter.dtype, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("DepthwiseConv2dNativeBackpropInput", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. + /// + /// + /// + /// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the + /// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each + /// input channel is processed independently of the others with its own structuring + /// function. The `output` tensor has shape + /// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + /// tensor depend on the `padding` algorithm. We currently only support the default + /// "NHWC" `data_format`. + /// + /// In detail, the grayscale morphological 2-D dilation is the max-sum correlation + /// (for consistency with `conv2d`, we use unmirrored filters): + /// + /// output[b, y, x, c] = + /// max_{dy, dx} input[b, + /// strides[1] * y + rates[1] * dy, + /// strides[2] * x + rates[2] * dx, + /// c] + + /// filter[dy, dx, c] + /// + /// Max-pooling is a special case when the filter has size equal to the pooling + /// kernel size and contains all zeros. + /// + /// Note on duality: The dilation of `input` by the `filter` is equal to the + /// negation of the erosion of `-input` by the reflected `filter`. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// + /// + /// + /// + /// The input stride for atrous morphological dilation. Must be: + /// `[1, rate_height, rate_width, 1]`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor dilation2d(Tensor input, Tensor filter, int[] strides, int[] rates, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dilation2d_eager_fallback(input, filter, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["strides"] = strides; + keywords["rates"] = rates; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("Dilation2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("Dilation2D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dilation2d_eager_fallback(Tensor input, Tensor filter, int[] strides, int[] rates, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "rates", rates, "padding", padding }; + var _result = _execute.execute("Dilation2D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Dilation2D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient of morphological 2-D dilation with respect to the filter. + /// + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension of + /// the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// + /// + /// + /// + /// 1-D of length 4. The input stride for atrous morphological dilation. + /// Must be: `[1, rate_height, rate_width, 1]`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor dilation2d_backprop_filter(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2DBackpropFilter", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dilation2d_backprop_filter_eager_fallback(input, filter, out_backprop, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["rates"] = rates; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("Dilation2DBackpropFilter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("Dilation2DBackpropFilter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dilation2d_backprop_filter_eager_fallback(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "rates", rates, "padding", padding }; + var _result = _execute.execute("Dilation2DBackpropFilter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Dilation2DBackpropFilter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient of morphological 2-D dilation with respect to the input. + /// + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension of + /// the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// + /// + /// + /// + /// 1-D of length 4. The input stride for atrous morphological dilation. + /// Must be: `[1, rate_height, rate_width, 1]`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor dilation2d_backprop_input(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2DBackpropInput", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dilation2d_backprop_input_eager_fallback(input, filter, out_backprop, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["rates"] = rates; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("Dilation2DBackpropInput", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("Dilation2DBackpropInput", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dilation2d_backprop_input_eager_fallback(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "rates", rates, "padding", padding }; + var _result = _execute.execute("Dilation2DBackpropInput", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Dilation2DBackpropInput", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the exponential linear function. + /// + /// + /// + /// The ELU function is defined as: + /// + /// * $ e ^ x - 1 $ if $ x < 0 $ + /// * $ x $ if $ x >= 0 $ + /// + /// Examples: + /// + /// >>> tf.nn.elu(1.0) + /// + /// >>> tf.nn.elu(0.0) + /// + /// >>> tf.nn.elu(-1000.0) + /// + /// + /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + /// ](http://arxiv.org/abs/1511.07289) + /// + /// + /// + /// + public static Tensor elu(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Elu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return elu_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Elu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Elu", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor elu_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Elu", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Elu", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients for the exponential linear (Elu) operation. + /// + /// + /// + /// + public static Tensor elu_grad(Tensor gradients, Tensor outputs, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EluGrad", name) { args = new object[] { gradients, outputs }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return elu_grad_eager_fallback(gradients, outputs, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["outputs"] = outputs; + var _op = tf.OpDefLib._apply_op_helper("EluGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("EluGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor elu_grad_eager_fallback(Tensor gradients, Tensor outputs, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, outputs }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("EluGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EluGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs fractional average pooling on the input. + /// + /// + /// + /// Fractional average pooling is similar to Fractional max pooling in the pooling + /// region generation step. The only difference is that after pooling regions are + /// generated, a mean operation is performed instead of a max operation in each + /// pooling region. + /// + /// + /// + /// + /// + /// Pooling ratio for each dimension of `value`, currently only + /// supports row and col dimension and should be >= 1.0. For example, a valid + /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + /// must be 1.0 because we don't allow pooling on batch and channels + /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + /// respectively. + /// + /// + /// + /// + /// When set to True, generates the pooling sequence in a + /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + /// difference between pseudorandom and random. + /// + /// + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [41/3, 26/3] for fractional avg pooling. + /// + /// + /// + /// + /// When set to True, a fixed pooling region will be used when + /// iterating over a FractionalAvgPool node in the computation graph. Mainly used + /// in unit test to make FractionalAvgPool deterministic. + /// + /// + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// + public static Tensor[] fractional_avg_pool(Tensor value, float[] pooling_ratio, bool pseudo_random = false, bool overlapping = false, bool deterministic = false, int seed = 0, int seed2 = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalAvgPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["pooling_ratio"] = pooling_ratio, ["pseudo_random"] = pseudo_random, ["overlapping"] = overlapping, ["deterministic"] = deterministic, ["seed"] = seed, ["seed2"] = seed2 } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fractional_avg_pool_eager_fallback(value, pooling_ratio: pooling_ratio, pseudo_random: pseudo_random, overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["pooling_ratio"] = pooling_ratio; + keywords["pseudo_random"] = pseudo_random; + keywords["overlapping"] = overlapping; + keywords["deterministic"] = deterministic; + keywords["seed"] = seed; + keywords["seed2"] = seed2; + var _op = tf.OpDefLib._apply_op_helper("FractionalAvgPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random", _op._get_attr_bool("pseudo_random"), "overlapping", _op._get_attr_bool("overlapping"), "deterministic", _op._get_attr_bool("deterministic"), "seed", _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("FractionalAvgPool", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fractional_avg_pool_eager_fallback(Tensor value, float[] pooling_ratio, bool pseudo_random, bool overlapping, bool deterministic, int seed, int seed2, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value }; + object[] _attrs = new object[] { "pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random, "overlapping", overlapping, "deterministic", deterministic, "seed", seed, "seed2", seed2, "T", value.dtype }; + var _result = _execute.execute("FractionalAvgPool", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FractionalAvgPool", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes gradient of the FractionalAvgPool function. + /// + /// + /// + /// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for + /// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of + /// out_backprop to those indices that form the same pooling cell. Therefore, we + /// just need to know the shape of original input tensor, instead of the whole + /// tensor. + /// + /// + /// + /// + /// + /// + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [41/3, 26/3] for fractional avg pooling. + /// + /// + /// + public static Tensor fractional_avg_pool_grad(Tensor orig_input_tensor_shape, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool overlapping = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalAvgPoolGrad", name) { args = new object[] { orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence }, attrs = new Dictionary() { ["overlapping"] = overlapping } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fractional_avg_pool_grad_eager_fallback(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping: overlapping, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["orig_input_tensor_shape"] = orig_input_tensor_shape; + keywords["out_backprop"] = out_backprop; + keywords["row_pooling_sequence"] = row_pooling_sequence; + keywords["col_pooling_sequence"] = col_pooling_sequence; + keywords["overlapping"] = overlapping; + var _op = tf.OpDefLib._apply_op_helper("FractionalAvgPoolGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "overlapping", _op._get_attr_bool("overlapping"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("FractionalAvgPoolGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fractional_avg_pool_grad_eager_fallback(Tensor orig_input_tensor_shape, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool overlapping, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence }; + object[] _attrs = new object[] { "overlapping", overlapping, "T", out_backprop.dtype }; + var _result = _execute.execute("FractionalAvgPoolGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FractionalAvgPoolGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs fractional max pooling on the input. + /// + /// + /// + /// Fractional max pooling is slightly different than regular max pooling. In + /// regular max pooling, you downsize an input set by taking the maximum value of + /// smaller N x N subsections of the set (often 2x2), and try to reduce the set by + /// a factor of N, where N is an integer. Fractional max pooling, as you might + /// expect from the word "fractional", means that the overall reduction ratio N + /// does not have to be an integer. + /// + /// The sizes of the pooling regions are generated randomly but are fairly uniform. + /// For example, let's look at the height dimension, and the constraints on the + /// list of rows that will be pool boundaries. + /// + /// First we define the following: + /// + /// 1. input_row_length : the number of rows from the input set + /// 2. output_row_length : which will be smaller than the input + /// 3. alpha = input_row_length / output_row_length : our reduction ratio + /// 4. K = floor(alpha) + /// 5. row_pooling_sequence : this is the result list of pool boundary rows + /// + /// Then, row_pooling_sequence should satisfy: + /// + /// 1. a[0] = 0 : the first value of the sequence is 0 + /// 2. a[end] = input_row_length : the last value of the sequence is the size + /// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size + /// 4. length(row_pooling_sequence) = output_row_length+1 + /// + /// For more details on fractional max pooling, see this paper: + /// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) + /// + /// + /// + /// + /// + /// Pooling ratio for each dimension of `value`, currently only + /// supports row and col dimension and should be >= 1.0. For example, a valid + /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + /// must be 1.0 because we don't allow pooling on batch and channels + /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + /// respectively. + /// + /// + /// + /// + /// When set to True, generates the pooling sequence in a + /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + /// difference between pseudorandom and random. + /// + /// + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [20, 16] for fractional max pooling. + /// + /// + /// + /// + /// When set to True, a fixed pooling region will be used when + /// iterating over a FractionalMaxPool node in the computation graph. Mainly used + /// in unit test to make FractionalMaxPool deterministic. + /// + /// + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// + public static Tensor[] fractional_max_pool(Tensor value, float[] pooling_ratio, bool pseudo_random = false, bool overlapping = false, bool deterministic = false, int seed = 0, int seed2 = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalMaxPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["pooling_ratio"] = pooling_ratio, ["pseudo_random"] = pseudo_random, ["overlapping"] = overlapping, ["deterministic"] = deterministic, ["seed"] = seed, ["seed2"] = seed2 } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fractional_max_pool_eager_fallback(value, pooling_ratio: pooling_ratio, pseudo_random: pseudo_random, overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["pooling_ratio"] = pooling_ratio; + keywords["pseudo_random"] = pseudo_random; + keywords["overlapping"] = overlapping; + keywords["deterministic"] = deterministic; + keywords["seed"] = seed; + keywords["seed2"] = seed2; + var _op = tf.OpDefLib._apply_op_helper("FractionalMaxPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random", _op._get_attr_bool("pseudo_random"), "overlapping", _op._get_attr_bool("overlapping"), "deterministic", _op._get_attr_bool("deterministic"), "seed", _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("FractionalMaxPool", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fractional_max_pool_eager_fallback(Tensor value, float[] pooling_ratio, bool pseudo_random, bool overlapping, bool deterministic, int seed, int seed2, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value }; + object[] _attrs = new object[] { "pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random, "overlapping", overlapping, "deterministic", deterministic, "seed", seed, "seed2", seed2, "T", value.dtype }; + var _result = _execute.execute("FractionalMaxPool", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FractionalMaxPool", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes gradient of the FractionalMaxPool function. + /// + /// + /// + /// + /// + /// + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [20, 16] for fractional max pooling. + /// + /// + /// + public static Tensor fractional_max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool overlapping = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalMaxPoolGrad", name) { args = new object[] { orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence }, attrs = new Dictionary() { ["overlapping"] = overlapping } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fractional_max_pool_grad_eager_fallback(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping: overlapping, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["out_backprop"] = out_backprop; + keywords["row_pooling_sequence"] = row_pooling_sequence; + keywords["col_pooling_sequence"] = col_pooling_sequence; + keywords["overlapping"] = overlapping; + var _op = tf.OpDefLib._apply_op_helper("FractionalMaxPoolGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "overlapping", _op._get_attr_bool("overlapping"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("FractionalMaxPoolGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fractional_max_pool_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool overlapping, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence }; + object[] _attrs = new object[] { "overlapping", overlapping, "T", orig_input.dtype }; + var _result = _execute.execute("FractionalMaxPoolGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FractionalMaxPoolGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// + /// The data format for x and y. Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon = 0.0001f, float exponential_avg_factor = 1f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNorm", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_eager_fallback(x, scale, offset, mean, variance, epsilon: epsilon, exponential_avg_factor: exponential_avg_factor, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["scale"] = scale; + keywords["offset"] = offset; + keywords["mean"] = mean; + keywords["variance"] = variance; + keywords["epsilon"] = epsilon; + keywords["exponential_avg_factor"] = exponential_avg_factor; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNorm", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "epsilon", _op.get_attr("epsilon"), "exponential_avg_factor", _op.get_attr("exponential_avg_factor"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNorm", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_eager_fallback(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon, float exponential_avg_factor, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, scale, offset, mean, variance }; + object[] _attrs = new object[] { "T", x.dtype, "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNorm", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNorm", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Gradient for batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_grad(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float epsilon = 0.0001f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGrad", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_grad_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon: epsilon, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["y_backprop"] = y_backprop; + keywords["x"] = x; + keywords["scale"] = scale; + keywords["reserve_space_1"] = reserve_space_1; + keywords["reserve_space_2"] = reserve_space_2; + keywords["epsilon"] = epsilon; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "epsilon", _op.get_attr("epsilon"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormGrad", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_grad_eager_fallback(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float epsilon, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }; + object[] _attrs = new object[] { "T", y_backprop.dtype, "epsilon", epsilon, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormGrad", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormGrad", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Gradient for batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_grad_v2(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float epsilon = 0.0001f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGradV2", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_grad_v2_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon: epsilon, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["y_backprop"] = y_backprop; + keywords["x"] = x; + keywords["scale"] = scale; + keywords["reserve_space_1"] = reserve_space_1; + keywords["reserve_space_2"] = reserve_space_2; + keywords["epsilon"] = epsilon; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormGradV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), "epsilon", _op.get_attr("epsilon"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormGradV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_grad_v2_eager_fallback(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float epsilon, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }; + object[] _attrs = new object[] { "T", y_backprop.dtype, "U", reserve_space_1.dtype, "epsilon", epsilon, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormGradV2", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormGradV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Gradient for batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_grad_v3(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, Tensor reserve_space_3, float epsilon = 0.0001f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGradV3", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_grad_v3_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon: epsilon, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["y_backprop"] = y_backprop; + keywords["x"] = x; + keywords["scale"] = scale; + keywords["reserve_space_1"] = reserve_space_1; + keywords["reserve_space_2"] = reserve_space_2; + keywords["reserve_space_3"] = reserve_space_3; + keywords["epsilon"] = epsilon; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormGradV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), "epsilon", _op.get_attr("epsilon"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormGradV3", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_grad_v3_eager_fallback(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, Tensor reserve_space_3, float epsilon, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3 }; + object[] _attrs = new object[] { "T", y_backprop.dtype, "U", reserve_space_1.dtype, "epsilon", epsilon, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormGradV3", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormGradV3", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// + /// The data format for x and y. Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_v2(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon = 0.0001f, float exponential_avg_factor = 1f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormV2", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_v2_eager_fallback(x, scale, offset, mean, variance, epsilon: epsilon, exponential_avg_factor: exponential_avg_factor, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["scale"] = scale; + keywords["offset"] = offset; + keywords["mean"] = mean; + keywords["variance"] = variance; + keywords["epsilon"] = epsilon; + keywords["exponential_avg_factor"] = exponential_avg_factor; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), "epsilon", _op.get_attr("epsilon"), "exponential_avg_factor", _op.get_attr("exponential_avg_factor"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_v2_eager_fallback(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon, float exponential_avg_factor, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, scale, offset, mean, variance }; + object[] _attrs = new object[] { "T", x.dtype, "U", scale.dtype, "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormV2", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// + /// The data format for x and y. Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_v3(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon = 0.0001f, float exponential_avg_factor = 1f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormV3", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_v3_eager_fallback(x, scale, offset, mean, variance, epsilon: epsilon, exponential_avg_factor: exponential_avg_factor, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["scale"] = scale; + keywords["offset"] = offset; + keywords["mean"] = mean; + keywords["variance"] = variance; + keywords["epsilon"] = epsilon; + keywords["exponential_avg_factor"] = exponential_avg_factor; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), "epsilon", _op.get_attr("epsilon"), "exponential_avg_factor", _op.get_attr("exponential_avg_factor"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormV3", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_v3_eager_fallback(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon, float exponential_avg_factor, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, scale, offset, mean, variance }; + object[] _attrs = new object[] { "T", x.dtype, "U", scale.dtype, "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormV3", 6, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormV3", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Performs a padding as a preprocess during a convolution. + /// + /// + /// + /// Similar to FusedResizeAndPadConv2d, this op allows for an optimized + /// implementation where the spatial padding transformation stage is fused with the + /// im2col lookup, but in this case without the bilinear filtering required for + /// resizing. Fusing the padding prevents the need to write out the intermediate + /// results as whole tensors, reducing memory pressure, and we can get some latency + /// gains by merging the transformation calculations. + /// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' + /// order is used instead. + /// Internally this op uses a single per-graph scratch buffer, which means that it + /// will block if multiple versions are being run in parallel. This is because this + /// operator is primarily an optimization to minimize memory usage. + /// + /// + /// + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. Must be in the same order as the dimension specified with format. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor fused_pad_conv2d(Tensor input, Tensor paddings, Tensor filter, string mode, int[] strides, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedPadConv2D", name) { args = new object[] { input, paddings, filter }, attrs = new Dictionary() { ["mode"] = mode, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fused_pad_conv2d_eager_fallback(input, paddings, filter, mode: mode, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["filter"] = filter; + keywords["mode"] = mode; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("FusedPadConv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("FusedPadConv2D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fused_pad_conv2d_eager_fallback(Tensor input, Tensor paddings, Tensor filter, string mode, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings, filter }; + object[] _attrs = new object[] { "T", input.dtype, "mode", mode, "strides", strides, "padding", padding }; + var _result = _execute.execute("FusedPadConv2D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedPadConv2D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs a resize and padding as a preprocess during a convolution. + /// + /// + /// + /// It's often possible to do spatial transformations more efficiently as part of + /// the packing stage of a convolution, so this op allows for an optimized + /// implementation where these stages are fused together. This prevents the need to + /// write out the intermediate results as whole tensors, reducing memory pressure, + /// and we can get some latency gains by merging the transformation calculations. + /// The data_format attribute for Conv2D isn't supported by this op, and defaults to + /// 'NHWC' order. + /// Internally this op uses a single per-graph scratch buffer, which means that it + /// will block if multiple versions are being run in parallel. This is because this + /// operator is primarily an optimization to minimize memory usage. + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. Must be in the same order as the dimension specified with format. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor fused_resize_and_pad_conv2d(Tensor input, Tensor size, Tensor paddings, Tensor filter, string mode, int[] strides, string padding, bool resize_align_corners = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedResizeAndPadConv2D", name) { args = new object[] { input, size, paddings, filter }, attrs = new Dictionary() { ["resize_align_corners"] = resize_align_corners, ["mode"] = mode, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fused_resize_and_pad_conv2d_eager_fallback(input, size, paddings, filter, resize_align_corners: resize_align_corners, mode: mode, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["size"] = size; + keywords["paddings"] = paddings; + keywords["filter"] = filter; + keywords["resize_align_corners"] = resize_align_corners; + keywords["mode"] = mode; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("FusedResizeAndPadConv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "resize_align_corners", _op._get_attr_bool("resize_align_corners"), "mode", _op.get_attr("mode"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("FusedResizeAndPadConv2D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fused_resize_and_pad_conv2d_eager_fallback(Tensor input, Tensor size, Tensor paddings, Tensor filter, bool resize_align_corners, string mode, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, size, paddings, filter }; + object[] _attrs = new object[] { "T", input.dtype, "resize_align_corners", resize_align_corners, "mode", mode, "strides", strides, "padding", padding }; + var _result = _execute.execute("FusedResizeAndPadConv2D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Says whether the targets are in the top `K` predictions. + /// + /// + /// + /// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + /// prediction for the target class is among the top `k` predictions among + /// all predictions for example `i`. Note that the behavior of `InTopK` differs + /// from the `TopK` op in its handling of ties; if multiple classes have the + /// same prediction value and straddle the top-`k` boundary, all of those + /// classes are considered to be in the top `k`. + /// + /// More formally, let + /// + /// \(predictions_i\) be the predictions for all classes for example `i`, + /// \(targets_i\) be the target class for example `i`, + /// \(out_i\) be the output for example `i`, + /// + /// $$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$ + /// + /// + /// + /// + /// + /// + /// Number of top elements to look at for computing precision. + /// + /// + /// + public static Tensor in_top_k(Tensor predictions, Tensor targets, int k = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InTopK", name) { args = new object[] { predictions, targets }, attrs = new Dictionary() { ["k"] = k } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return in_top_k_eager_fallback(predictions, targets, k: k, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["predictions"] = predictions; + keywords["targets"] = targets; + keywords["k"] = k; + var _op = tf.OpDefLib._apply_op_helper("InTopK", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "k", _op._get_attr_int("k"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("InTopK", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor in_top_k_eager_fallback(Tensor predictions, Tensor targets, int k, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { predictions, targets }; + object[] _attrs = new object[] { "k", k, "T", targets.dtype }; + var _result = _execute.execute("InTopK", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InTopK", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Says whether the targets are in the top `K` predictions. + /// + /// + /// + /// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + /// prediction for the target class is among the top `k` predictions among + /// all predictions for example `i`. Note that the behavior of `InTopK` differs + /// from the `TopK` op in its handling of ties; if multiple classes have the + /// same prediction value and straddle the top-`k` boundary, all of those + /// classes are considered to be in the top `k`. + /// + /// More formally, let + /// + /// \(predictions_i\) be the predictions for all classes for example `i`, + /// \(targets_i\) be the target class for example `i`, + /// \(out_i\) be the output for example `i`, + /// + /// $$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$ + /// + /// + /// + /// + /// + /// + public static Tensor in_top_kv2(Tensor predictions, Tensor targets, Tensor k, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InTopKV2", name) { args = new object[] { predictions, targets, k }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return in_top_kv2_eager_fallback(predictions, targets, k, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["predictions"] = predictions; + keywords["targets"] = targets; + keywords["k"] = k; + var _op = tf.OpDefLib._apply_op_helper("InTopKV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InTopKV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor in_top_kv2_eager_fallback(Tensor predictions, Tensor targets, Tensor k, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { predictions, targets, k }; + object[] _attrs = new object[] { "T", targets.dtype }; + var _result = _execute.execute("InTopKV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InTopKV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Solves a batch of isotonic regression problems. + /// + /// + /// + /// Dtype of output. + /// + /// + public static Tensor[] isotonic_regression(Tensor input, TF_DataType output_dtype = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsotonicRegression", name) { args = new object[] { input }, attrs = new Dictionary() { ["output_dtype"] = output_dtype } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return isotonic_regression_eager_fallback(input, output_dtype: output_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["output_dtype"] = output_dtype; + var _op = tf.OpDefLib._apply_op_helper("IsotonicRegression", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "output_dtype", _op._get_attr_type("output_dtype") }; + _execute.record_gradient("IsotonicRegression", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] isotonic_regression_eager_fallback(Tensor input, TF_DataType output_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "output_dtype", output_dtype }; + var _result = _execute.execute("IsotonicRegression", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IsotonicRegression", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Local Response Normalization. + /// + /// + /// + /// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last + /// dimension), and each vector is normalized independently. Within a given vector, + /// each component is divided by the weighted, squared sum of inputs within + /// `depth_radius`. In detail, + /// + /// sqr_sum[a, b, c, d] = + /// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + /// output = input / (bias + alpha * sqr_sum) ** beta + /// + /// For details, see [Krizhevsky et al., ImageNet classification with deep + /// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). + /// + /// + /// + /// + /// + /// 0-D. Half-width of the 1-D normalization window. + /// + /// + /// + /// + /// An offset (usually positive to avoid dividing by 0). + /// + /// + /// + /// + /// A scale factor, usually positive. + /// + /// + /// + /// + /// An exponent. + /// + /// + /// + public static Tensor lrn(Tensor input, int depth_radius = 5, float bias = 1f, float alpha = 1f, float beta = 0.5f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LRN", name) { args = new object[] { input }, attrs = new Dictionary() { ["depth_radius"] = depth_radius, ["bias"] = bias, ["alpha"] = alpha, ["beta"] = beta } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return lrn_eager_fallback(input, depth_radius: depth_radius, bias: bias, alpha: alpha, beta: beta, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["depth_radius"] = depth_radius; + keywords["bias"] = bias; + keywords["alpha"] = alpha; + keywords["beta"] = beta; + var _op = tf.OpDefLib._apply_op_helper("LRN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "depth_radius", _op._get_attr_int("depth_radius"), "bias", _op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta", _op.get_attr("beta"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("LRN", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor lrn_eager_fallback(Tensor input, int depth_radius, float bias, float alpha, float beta, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "depth_radius", depth_radius, "bias", bias, "alpha", alpha, "beta", beta, "T", input.dtype }; + var _result = _execute.execute("LRN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LRN", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes rectified linear: `max(features, features * alpha)`. + /// + /// + /// + /// + public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LeakyRelu", name) { args = new object[] { features }, attrs = new Dictionary() { ["alpha"] = alpha } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return leaky_relu_eager_fallback(features, alpha: alpha, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["alpha"] = alpha; + var _op = tf.OpDefLib._apply_op_helper("LeakyRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("LeakyRelu", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor leaky_relu_eager_fallback(Tensor features, float alpha, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "alpha", alpha, "T", features.dtype }; + var _result = _execute.execute("LeakyRelu", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LeakyRelu", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes rectified linear gradients for a LeakyRelu operation. + /// + /// + /// + /// + /// + public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LeakyReluGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { ["alpha"] = alpha } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return leaky_relu_grad_eager_fallback(gradients, features, alpha: alpha, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["features"] = features; + keywords["alpha"] = alpha; + var _op = tf.OpDefLib._apply_op_helper("LeakyReluGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("LeakyReluGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor leaky_relu_grad_eager_fallback(Tensor gradients, Tensor features, float alpha, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, features }; + object[] _attrs = new object[] { "alpha", alpha, "T", gradients.dtype }; + var _result = _execute.execute("LeakyReluGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LeakyReluGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes log softmax activations. + /// + /// + /// + /// For each batch `i` and class `j` we have + /// + /// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) + /// + /// + /// + /// + public static Tensor log_softmax(Tensor logits, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogSoftmax", name) { args = new object[] { logits }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return log_softmax_eager_fallback(logits, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["logits"] = logits; + var _op = tf.OpDefLib._apply_op_helper("LogSoftmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("LogSoftmax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor log_softmax_eager_fallback(Tensor logits, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { logits }; + object[] _attrs = new object[] { "T", logits.dtype }; + var _result = _execute.execute("LogSoftmax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LogSoftmax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs max pooling on the input. + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool(Tensor input, int[] ksize, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_eager_fallback(input, ksize: ksize, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("MaxPool", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_eager_fallback(Tensor input, int[] ksize, int[] strides, string padding, int[] explicit_paddings, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "ksize", ksize, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format }; + var _result = _execute.execute("MaxPool", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPool", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs 3D max pooling on the input. + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool3d(Tensor input, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3D", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool3d_eager_fallback(input, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPool3D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPool3D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool3d_eager_fallback(Tensor input, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", input.dtype }; + var _result = _execute.execute("MaxPool3D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPool3D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of 3D max pooling function. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool3d_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3DGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool3d_grad_eager_fallback(orig_input, orig_output, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPool3DGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T"), "TInput", _op._get_attr_type("TInput") }; + _execute.record_gradient("MaxPool3DGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool3d_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", grad.dtype, "TInput", orig_input.dtype }; + var _result = _execute.execute("MaxPool3DGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPool3DGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool3d_grad_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3DGradGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool3d_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPool3DGradGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPool3DGradGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool3d_grad_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPool3DGradGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPool3DGradGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_eager_fallback(orig_input, orig_output, grad, ksize: ksize, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, int[] explicit_paddings, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPoolGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_grad_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPoolGradGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_grad_grad_v2(Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGradV2", name) { args = new object[] { orig_input, orig_output, grad, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradGradV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradGradV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_grad_v2_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad, ksize, strides }; + object[] _attrs = new object[] { "padding", padding, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPoolGradGradV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradGradV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Whether to include batch dimension in flattened index of `argmax`. + /// + /// + /// + public static Tensor max_pool_grad_grad_with_argmax(Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, bool include_batch_in_index = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGradWithArgmax", name) { args = new object[] { input, grad, argmax }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_grad_with_argmax_eager_fallback(input, grad, argmax, ksize: ksize, strides: strides, padding: padding, include_batch_in_index: include_batch_in_index, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["grad"] = grad; + keywords["argmax"] = argmax; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["include_batch_in_index"] = include_batch_in_index; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradGradWithArgmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "include_batch_in_index", _op._get_attr_bool("include_batch_in_index"), "Targmax", _op._get_attr_type("Targmax"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradGradWithArgmax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_grad_with_argmax_eager_fallback(Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, bool include_batch_in_index, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, grad, argmax }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "include_batch_in_index", include_batch_in_index, "Targmax", argmax.dtype, "T", input.dtype }; + var _result = _execute.execute("MaxPoolGradGradWithArgmax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_grad_v2(Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradV2", name) { args = new object[] { orig_input, orig_output, grad, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_v2_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad, ksize, strides }; + object[] _attrs = new object[] { "padding", padding, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPoolGradV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Whether to include batch dimension in flattened index of `argmax`. + /// + /// + /// + public static Tensor max_pool_grad_with_argmax(Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, bool include_batch_in_index = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradWithArgmax", name) { args = new object[] { input, grad, argmax }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_with_argmax_eager_fallback(input, grad, argmax, ksize: ksize, strides: strides, padding: padding, include_batch_in_index: include_batch_in_index, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["grad"] = grad; + keywords["argmax"] = argmax; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["include_batch_in_index"] = include_batch_in_index; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradWithArgmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "include_batch_in_index", _op._get_attr_bool("include_batch_in_index"), "Targmax", _op._get_attr_type("Targmax"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradWithArgmax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_with_argmax_eager_fallback(Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, bool include_batch_in_index, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, grad, argmax }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "include_batch_in_index", include_batch_in_index, "Targmax", argmax.dtype, "T", input.dtype }; + var _result = _execute.execute("MaxPoolGradWithArgmax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs max pooling on the input. + /// + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_v2(Tensor input, Tensor ksize, Tensor strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolV2", name) { args = new object[] { input, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_v2_eager_fallback(input, ksize, strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("MaxPoolV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_v2_eager_fallback(Tensor input, Tensor ksize, Tensor strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, ksize, strides }; + object[] _attrs = new object[] { "T", input.dtype, "padding", padding, "data_format", data_format }; + var _result = _execute.execute("MaxPoolV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs max pooling on the input and outputs both max values and indices. + /// + /// + /// + /// The indices in `argmax` are flattened, so that a maximum value at position + /// `[b, y, x, c]` becomes flattened index: + /// `(y * width + x) * channels + c` if `include_batch_in_index` is False; + /// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + /// + /// The indices returned are always in `[0, height) x [0, width)` before flattening, + /// even if padding is involved and the mathematically correct answer is outside + /// (either negative or too large). This is a bug, but fixing it is difficult to do + /// in a safe backwards compatible way, especially due to flattening. + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Whether to include batch dimension in flattened index of `argmax`. + /// + /// + /// + public static Tensor[] max_pool_with_argmax(Tensor input, int[] ksize, int[] strides, string padding, TF_DataType Targmax = TF_DataType.TF_INT64, bool include_batch_in_index = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolWithArgmax", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["Targmax"] = Targmax, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return max_pool_with_argmax_eager_fallback(input, ksize: ksize, strides: strides, Targmax: Targmax, padding: padding, include_batch_in_index: include_batch_in_index, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["Targmax"] = Targmax; + keywords["padding"] = padding; + keywords["include_batch_in_index"] = include_batch_in_index; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolWithArgmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "Targmax", _op._get_attr_type("Targmax"), "padding", _op.get_attr("padding"), "include_batch_in_index", _op._get_attr_bool("include_batch_in_index"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolWithArgmax", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] max_pool_with_argmax_eager_fallback(Tensor input, int[] ksize, int[] strides, TF_DataType Targmax, string padding, bool include_batch_in_index, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "Targmax", Targmax, "padding", padding, "include_batch_in_index", include_batch_in_index, "T", input.dtype }; + var _result = _execute.execute("MaxPoolWithArgmax", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolWithArgmax", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds values of the `n`-th order statistic for the last dimension. + /// + /// + /// + /// If the input is a vector (rank-1), finds the entries which is the nth-smallest + /// value in the vector and outputs their values as scalar tensor. + /// + /// For matrices (resp. higher rank input), computes the entries which is the + /// nth-smallest value in each row (resp. vector along the last dimension). Thus, + /// + /// values.shape = input.shape[:-1] + /// + /// + /// + /// + /// + /// + /// When set to True, find the nth-largest value in the vector and vice + /// versa. + /// + /// + /// + public static Tensor nth_element(Tensor input, Tensor n, bool reverse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NthElement", name) { args = new object[] { input, n }, attrs = new Dictionary() { ["reverse"] = reverse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return nth_element_eager_fallback(input, n, reverse: reverse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["n"] = n; + keywords["reverse"] = reverse; + var _op = tf.OpDefLib._apply_op_helper("NthElement", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "reverse", _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("NthElement", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor nth_element_eager_fallback(Tensor input, Tensor n, bool reverse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, n }; + object[] _attrs = new object[] { "reverse", reverse, "T", input.dtype }; + var _result = _execute.execute("NthElement", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("NthElement", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Produces the average pool of the input tensor for quantized types. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// The length must be 4 to match the number of dimensions of the input. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// tensor. The length must be 4 to match the number of dimensions of the input. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor[] quantized_avg_pool(Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedAvgPool", name) { args = new object[] { input, min_input, max_input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_avg_pool_eager_fallback(input, min_input, max_input, ksize: ksize, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("QuantizedAvgPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("QuantizedAvgPool", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_avg_pool_eager_fallback(Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, min_input, max_input }; + object[] _attrs = new object[] { "T", input.dtype, "ksize", ksize, "strides", strides, "padding", padding }; + var _result = _execute.execute("QuantizedAvgPool", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedAvgPool", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Quantized Batch normalization. + /// + /// + /// + /// This op is deprecated and will be removed in the future. Prefer + /// `tf.nn.batch_normalization`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// + /// + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// + public static Tensor[] quantized_batch_norm_with_global_normalization(Tensor t, Tensor t_min, Tensor t_max, Tensor m, Tensor m_min, Tensor m_max, Tensor v, Tensor v_min, Tensor v_max, Tensor beta, Tensor beta_min, Tensor beta_max, Tensor gamma, Tensor gamma_min, Tensor gamma_max, TF_DataType out_type, float variance_epsilon, bool scale_after_normalization, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedBatchNormWithGlobalNormalization", name) { args = new object[] { t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max }, attrs = new Dictionary() { ["out_type"] = out_type, ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_batch_norm_with_global_normalization_eager_fallback(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type: out_type, variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["t"] = t; + keywords["t_min"] = t_min; + keywords["t_max"] = t_max; + keywords["m"] = m; + keywords["m_min"] = m_min; + keywords["m_max"] = m_max; + keywords["v"] = v; + keywords["v_min"] = v_min; + keywords["v_max"] = v_max; + keywords["beta"] = beta; + keywords["beta_min"] = beta_min; + keywords["beta_max"] = beta_max; + keywords["gamma"] = gamma; + keywords["gamma_min"] = gamma_min; + keywords["gamma_max"] = gamma_max; + keywords["out_type"] = out_type; + keywords["variance_epsilon"] = variance_epsilon; + keywords["scale_after_normalization"] = scale_after_normalization; + var _op = tf.OpDefLib._apply_op_helper("QuantizedBatchNormWithGlobalNormalization", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type"), "variance_epsilon", _op.get_attr("variance_epsilon"), "scale_after_normalization", _op._get_attr_bool("scale_after_normalization") }; + _execute.record_gradient("QuantizedBatchNormWithGlobalNormalization", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_batch_norm_with_global_normalization_eager_fallback(Tensor t, Tensor t_min, Tensor t_max, Tensor m, Tensor m_min, Tensor m_max, Tensor v, Tensor v_min, Tensor v_max, Tensor beta, Tensor beta_min, Tensor beta_max, Tensor gamma, Tensor gamma_min, Tensor gamma_max, TF_DataType out_type, float variance_epsilon, bool scale_after_normalization, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max }; + object[] _attrs = new object[] { "Tinput", t.dtype, "out_type", out_type, "variance_epsilon", variance_epsilon, "scale_after_normalization", scale_after_normalization }; + var _result = _execute.execute("QuantizedBatchNormWithGlobalNormalization", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Adds Tensor 'bias' to Tensor 'input' for Quantized types. + /// + /// + /// + /// Broadcasts the values of bias on dimensions 0..N-2 of 'input'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_bias_add(Tensor input, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_bias, Tensor max_bias, TF_DataType out_type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedBiasAdd", name) { args = new object[] { input, bias, min_input, max_input, min_bias, max_bias }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_bias_add_eager_fallback(input, bias, min_input, max_input, min_bias, max_bias, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_bias"] = min_bias; + keywords["max_bias"] = max_bias; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizedBiasAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizedBiasAdd", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_bias_add_eager_fallback(Tensor input, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_bias, Tensor max_bias, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, bias, min_input, max_input, min_bias, max_bias }; + object[] _attrs = new object[] { "T1", input.dtype, "T2", bias.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizedBiasAdd", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedBiasAdd", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes a 2D convolution given quantized 4D input and filter tensors. + /// + /// + /// + /// The inputs are quantized tensors where the lowest value represents the real + /// number of the associated minimum, and the highest represents the maximum. + /// This means that you can only interpret the quantized output in the same way, by + /// taking the returned minimum and maximum values into account. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor[] quantized_conv2d(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2D", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("QuantizedConv2D", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("QuantizedConv2D", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2D", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_and_relu(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndRelu", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_and_relu_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_and_relu_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_and_relu_and_requantize(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndReluAndRequantize", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_and_relu_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_and_requantize(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndRequantize", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes QuantizedConv2D per channel. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The quantized type of output tensor that needs to be converted. + /// + /// + /// + /// list of stride values. + /// + /// + /// + /// list of dilation values. + /// + /// + public static Tensor[] quantized_conv2d_per_channel(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DPerChannel", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_per_channel_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DPerChannel", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("QuantizedConv2DPerChannel", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_per_channel_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("QuantizedConv2DPerChannel", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DPerChannel", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBias", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBias", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBias", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBias", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBias", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_and_relu(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_and_relu_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_and_relu_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, Tensor summand, Tensor min_summand, Tensor max_summand, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["summand"] = summand; + keywords["min_summand"] = min_summand; + keywords["max_summand"] = max_summand; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "Tsummand", _op._get_attr_type("Tsummand"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, Tensor summand, Tensor min_summand, Tensor max_summand, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "Tsummand", summand.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_sum_and_relu(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor summand, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSumAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_sum_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["summand"] = summand; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasSumAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasSumAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_sum_and_relu_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor summand, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, summand }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasSumAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasSumAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_sum_and_relu_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, Tensor summand, Tensor min_summand, Tensor max_summand, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSumAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["summand"] = summand; + keywords["min_summand"] = min_summand; + keywords["max_summand"] = max_summand; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasSumAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "Tsummand", _op._get_attr_type("Tsummand"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasSumAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, Tensor summand, Tensor min_summand, Tensor max_summand, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "Tsummand", summand.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasSumAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasSumAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes quantized depthwise Conv2D. + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. + /// + /// + /// List of stride values. + /// + /// + /// + /// List of dilation values. + /// + /// + public static Tensor[] quantized_depthwise_conv2d(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2D", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_depthwise_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("QuantizedDepthwiseConv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("QuantizedDepthwiseConv2D", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_depthwise_conv2d_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("QuantizedDepthwiseConv2D", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedDepthwiseConv2D", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes quantized depthwise Conv2D with Bias. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. + /// + /// + /// List of stride values. + /// + /// + /// + /// List of dilation values. + /// + /// + public static Tensor[] quantized_depthwise_conv2d_with_bias(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBias", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_depthwise_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("QuantizedDepthwiseConv2DWithBias", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("QuantizedDepthwiseConv2DWithBias", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_depthwise_conv2d_with_bias_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("QuantizedDepthwiseConv2DWithBias", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedDepthwiseConv2DWithBias", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes quantized depthwise Conv2D with Bias and Relu. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. + /// + /// + /// List of stride values. + /// + /// + /// + /// List of dilation values. + /// + /// + /// + public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBiasAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedDepthwiseConv2DWithBiasAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedDepthwiseConv2DWithBiasAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedDepthwiseConv2DWithBiasAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedDepthwiseConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes quantized depthwise Conv2D with Bias, Relu and Requantize. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. + /// + /// + /// List of stride values. + /// + /// + /// + /// List of dilation values. + /// + /// + /// + public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// ~~%~~Performs a quantized matrix multiplication of `a` by the matrix `b` with bias~~%~~add.~~%~~ + /// + /// + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// multiplication result. The bias size must match inner dimension of `b`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, `a` is transposed before multiplication. + /// + /// + /// If true, `b` is transposed before multiplication. + /// + /// + /// + /// Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// + /// + public static Tensor[] quantized_mat_mul_with_bias(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput = TF_DataType.TF_QINT32, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBias", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBias", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Tbias", _op._get_attr_type("Tbias"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBias", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_with_bias_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Tbias", bias.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBias", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBias", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor quantized_mat_mul_with_bias_and_dequantize(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndDequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_and_dequantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBiasAndDequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Tbias", _op._get_attr_type("Tbias"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBiasAndDequantize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantized_mat_mul_with_bias_and_dequantize_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Tbias", bias.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBiasAndDequantize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBiasAndDequantize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// ~~%~~Perform a quantized matrix multiplication of `a` by the matrix `b` with bias~~%~~add and relu fusion.~~%~~ + /// + /// + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// multiplication result. The bias size must match inner dimension of `b`. Then do + /// relu activation to get non-negative result. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, `a` is transposed before multiplication. + /// + /// + /// If true, `b` is transposed before multiplication. + /// + /// + /// + /// Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// + /// + public static Tensor[] quantized_mat_mul_with_bias_and_relu(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput = TF_DataType.TF_QINT32, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndRelu", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_and_relu_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBiasAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBiasAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_with_bias_and_relu_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBiasAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBiasAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// ~~%~~Perform a quantized matrix multiplication of `a` by the matrix `b` with bias~~%~~add and relu and requantize fusion.~~%~~ + /// + /// + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// multiplication result. The bias size must match inner dimension of `b`. Then do + /// relu activation to get non-negative result. Then do requantize operation to get + /// final uint8 result. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, `a` is transposed before multiplication. + /// + /// + /// If true, `b` is transposed before multiplication. + /// + /// + /// + /// Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// + /// + public static Tensor[] quantized_mat_mul_with_bias_and_relu_and_requantize(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput = TF_DataType.TF_QUINT8, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndReluAndRequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBiasAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Tbias", _op._get_attr_type("Tbias"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBiasAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Tbias", bias.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBiasAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_mat_mul_with_bias_and_requantize(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput = TF_DataType.TF_QUINT8, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndRequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBiasAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Tbias", _op._get_attr_type("Tbias"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBiasAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_with_bias_and_requantize_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Tbias", bias.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBiasAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBiasAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Produces the max pool of the input tensor for quantized types. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// The length must be 4 to match the number of dimensions of the input. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// tensor. The length must be 4 to match the number of dimensions of the input. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor[] quantized_max_pool(Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMaxPool", name) { args = new object[] { input, min_input, max_input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_max_pool_eager_fallback(input, min_input, max_input, ksize: ksize, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMaxPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("QuantizedMaxPool", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_max_pool_eager_fallback(Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, min_input, max_input }; + object[] _attrs = new object[] { "T", input.dtype, "ksize", ksize, "strides", strides, "padding", padding }; + var _result = _execute.execute("QuantizedMaxPool", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMaxPool", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes Quantized Rectified Linear: `max(features, 0)` + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_relu(Tensor features, Tensor min_features, Tensor max_features, TF_DataType out_type = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedRelu", name) { args = new object[] { features, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_relu_eager_fallback(features, min_features, max_features, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["min_features"] = min_features; + keywords["max_features"] = max_features; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizedRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizedRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_relu_eager_fallback(Tensor features, Tensor min_features, Tensor max_features, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, min_features, max_features }; + object[] _attrs = new object[] { "Tinput", features.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizedRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_relu6(Tensor features, Tensor min_features, Tensor max_features, TF_DataType out_type = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedRelu6", name) { args = new object[] { features, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_relu6_eager_fallback(features, min_features, max_features, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["min_features"] = min_features; + keywords["max_features"] = max_features; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizedRelu6", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizedRelu6", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_relu6_eager_fallback(Tensor features, Tensor min_features, Tensor max_features, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, min_features, max_features }; + object[] _attrs = new object[] { "Tinput", features.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizedRelu6", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedRelu6", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_relu_x(Tensor features, Tensor max_value, Tensor min_features, Tensor max_features, TF_DataType out_type = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReluX", name) { args = new object[] { features, max_value, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_relu_x_eager_fallback(features, max_value, min_features, max_features, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["max_value"] = max_value; + keywords["min_features"] = min_features; + keywords["max_features"] = max_features; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizedReluX", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizedReluX", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_relu_x_eager_fallback(Tensor features, Tensor max_value, Tensor min_features, Tensor max_features, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, max_value, min_features, max_features }; + object[] _attrs = new object[] { "Tinput", features.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizedReluX", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedReluX", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes rectified linear: `max(features, 0)`. + /// + /// + /// + /// See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) + /// Example usage: + /// >>> tf.nn.relu([-2., 0., 3.]).numpy() + /// array([0., 0., 3.], dtype=float32) + /// + /// + /// + /// + public static Tensor relu(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Relu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return relu_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Relu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Relu", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor relu_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Relu", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Relu", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes rectified linear 6: `min(max(features, 0), 6)`. + /// + /// + /// + public static Tensor relu6(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Relu6", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return relu6_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Relu6", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Relu6", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor relu6_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Relu6", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Relu6", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes rectified linear gradients for a Relu operation. + /// + /// + /// + /// + public static Tensor relu_grad(Tensor gradients, Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReluGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return relu_grad_eager_fallback(gradients, features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("ReluGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ReluGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor relu_grad_eager_fallback(Tensor gradients, Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, features }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("ReluGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReluGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` + /// + /// + /// + /// if < 0, `scale * features` otherwise. + /// + /// To be used together with + /// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. + /// For correct dropout, use `tf.contrib.nn.alpha_dropout`. + /// + /// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + /// + /// + /// + /// + public static Tensor selu(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Selu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return selu_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Selu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Selu", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor selu_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Selu", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Selu", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients for the scaled exponential linear (Selu) operation. + /// + /// + /// + /// + public static Tensor selu_grad(Tensor gradients, Tensor outputs, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SeluGrad", name) { args = new object[] { gradients, outputs }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return selu_grad_eager_fallback(gradients, outputs, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["outputs"] = outputs; + var _op = tf.OpDefLib._apply_op_helper("SeluGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SeluGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor selu_grad_eager_fallback(Tensor gradients, Tensor outputs, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, outputs }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("SeluGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SeluGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softmax activations. + /// + /// + /// + /// For each batch `i` and class `j` we have + /// + /// $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ + /// + /// + /// + /// + public static Tensor softmax(Tensor logits, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softmax", name) { args = new object[] { logits }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softmax_eager_fallback(logits, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["logits"] = logits; + var _op = tf.OpDefLib._apply_op_helper("Softmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Softmax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softmax_eager_fallback(Tensor logits, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { logits }; + object[] _attrs = new object[] { "T", logits.dtype }; + var _result = _execute.execute("Softmax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Softmax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// + /// + /// Inputs are the logits, not probabilities. + /// + /// + /// + /// + /// + public static Tensor[] softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftmaxCrossEntropyWithLogits", name) { args = new object[] { features, labels }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return softmax_cross_entropy_with_logits_eager_fallback(features, labels, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["labels"] = labels; + var _op = tf.OpDefLib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SoftmaxCrossEntropyWithLogits", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] softmax_cross_entropy_with_logits_eager_fallback(Tensor features, Tensor labels, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, labels }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("SoftmaxCrossEntropyWithLogits", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + public static Tensor softplus(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softplus", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softplus_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Softplus", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Softplus", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softplus_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Softplus", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Softplus", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softplus gradients for a softplus operation. + /// + /// + /// + /// + public static Tensor softplus_grad(Tensor gradients, Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftplusGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softplus_grad_eager_fallback(gradients, features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("SoftplusGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SoftplusGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softplus_grad_eager_fallback(Tensor gradients, Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, features }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("SoftplusGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SoftplusGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softsign: `features / (abs(features) + 1)`. + /// + /// + /// + public static Tensor softsign(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softsign", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softsign_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Softsign", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Softsign", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softsign_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Softsign", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Softsign", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softsign gradients for a softsign operation. + /// + /// + /// + /// + public static Tensor softsign_grad(Tensor gradients, Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftsignGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softsign_grad_eager_fallback(gradients, features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("SoftsignGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SoftsignGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softsign_grad_eager_fallback(Tensor gradients, Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, features }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("SoftsignGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SoftsignGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// + /// + /// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + /// a matrix of label probabilities, but rather a single label per row + /// of features. This label is considered to have probability 1.0 for the + /// given row. + /// + /// Inputs are the logits, not probabilities. + /// + /// + /// + /// + /// + public static Tensor[] sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSoftmaxCrossEntropyWithLogits", name) { args = new object[] { features, labels }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return sparse_softmax_cross_entropy_with_logits_eager_fallback(features, labels, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["labels"] = labels; + var _op = tf.OpDefLib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tlabels", _op._get_attr_type("Tlabels") }; + _execute.record_gradient("SparseSoftmaxCrossEntropyWithLogits", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] sparse_softmax_cross_entropy_with_logits_eager_fallback(Tensor features, Tensor labels, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, labels }; + object[] _attrs = new object[] { "T", features.dtype, "Tlabels", labels.dtype }; + var _result = _execute.execute("SparseSoftmaxCrossEntropyWithLogits", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds values and indices of the `k` largest elements for the last dimension. + /// + /// + /// + /// If the input is a vector (rank-1), finds the `k` largest entries in the vector + /// and outputs their values and indices as vectors. Thus `values[j]` is the + /// `j`-th largest entry in `input`, and its index is `indices[j]`. + /// + /// For matrices (resp. higher rank input), computes the top `k` entries in each + /// row (resp. vector along the last dimension). Thus, + /// + /// values.shape = indices.shape = input.shape[:-1] + [k] + /// + /// If two elements are equal, the lower-index element appears first. + /// + /// If `k` varies dynamically, use `TopKV2` below. + /// + /// + /// + /// + /// + /// Number of top elements to look for along the last dimension (along each + /// row for matrices). + /// + /// + /// + /// + /// If true the resulting `k` elements will be sorted by the values in + /// descending order. + /// + /// + /// + public static Tensor[] top_k(Tensor input, int k = 0, bool sorted = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TopK", name) { args = new object[] { input }, attrs = new Dictionary() { ["k"] = k, ["sorted"] = sorted } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return top_k_eager_fallback(input, k: k, sorted: sorted, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["sorted"] = sorted; + var _op = tf.OpDefLib._apply_op_helper("TopK", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "k", _op._get_attr_int("k"), "sorted", _op._get_attr_bool("sorted"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("TopK", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] top_k_eager_fallback(Tensor input, int k, bool sorted, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "k", k, "sorted", sorted, "T", input.dtype }; + var _result = _execute.execute("TopK", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TopK", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds values and indices of the `k` largest elements for the last dimension. + /// + /// + /// + /// If the input is a vector (rank-1), finds the `k` largest entries in the vector + /// and outputs their values and indices as vectors. Thus `values[j]` is the + /// `j`-th largest entry in `input`, and its index is `indices[j]`. + /// + /// For matrices (resp. higher rank input), computes the top `k` entries in each + /// row (resp. vector along the last dimension). Thus, + /// + /// values.shape = indices.shape = input.shape[:-1] + [k] + /// + /// If two elements are equal, the lower-index element appears first. + /// + /// + /// + /// + /// + /// + /// If true the resulting `k` elements will be sorted by the values in + /// descending order. + /// + /// + /// + public static Tensor[] top_kv2(Tensor input, Tensor k, bool sorted = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TopKV2", name) { args = new object[] { input, k }, attrs = new Dictionary() { ["sorted"] = sorted } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return top_kv2_eager_fallback(input, k, sorted: sorted, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["sorted"] = sorted; + var _op = tf.OpDefLib._apply_op_helper("TopKV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "sorted", _op._get_attr_bool("sorted"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("TopKV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] top_kv2_eager_fallback(Tensor input, Tensor k, bool sorted, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, k }; + object[] _attrs = new object[] { "sorted", sorted, "T", input.dtype }; + var _result = _execute.execute("TopKV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TopKV2", _inputs_flat, _attrs, _result); + } + return _result; + } +} diff --git a/src/TensorFlowNET.Core/Operations/gen_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ops.cs index fe67c2b84..5fa4c97dd 100644 --- a/src/TensorFlowNET.Core/Operations/gen_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_ops.cs @@ -10055,7 +10055,7 @@ public static Tensor ensure_shape(Tensor input, Shape shape, string name = "Ensu { try { - var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("EnsureShape", name, input, shape)); + var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "EnsureShape", name, input, shape)); return _result[0]; } catch (Exception) @@ -10076,7 +10076,7 @@ public static Tensor ensure_shape(Tensor input, Shape shape, string name = "Ensu dict["input"] = input; dict["shape"] = shape; var op = tf.OpDefLib._apply_op_helper("EnsureShape", name: name, keywords: dict); - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } @@ -10086,9 +10086,9 @@ public static Tensor ensure_shape(Tensor input, Shape shape, string name = "Ensu public static Tensor ensure_shape_eager_fallback(Tensor input, Shape shape, string name, Context ctx) { object[] attrs = new object[4] { "shape", shape, "T", input.dtype.as_datatype_enum() }; - var _result = execute.executes("EnsureShape", 1, new Tensor[] { input }, + var _result = _execute.execute("EnsureShape", 1, new Tensor[] { input }, attrs, ctx, name); - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } @@ -17194,7 +17194,7 @@ public static Operation merge_v2_checkpoints(Tensor[] checkpoint_prefixes, Tenso var ctx = tf.Context; if (ctx.executing_eagerly()) { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("MergeV2Checkpoints", name, + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "MergeV2Checkpoints", name, checkpoint_prefixes, destination_prefix, "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files)); result = null; return null; @@ -24297,7 +24297,7 @@ public static Tensor regex_full_match(Tensor input, Tensor pattern, string name var ctx = tf.Context; if (ctx.executing_eagerly()) { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("RegexFullMatch", name, input, pattern)); + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "RegexFullMatch", name, input, pattern)); return result[0]; } var dict = new Dictionary(); @@ -27201,7 +27201,7 @@ public static Tensor[] restore_v2(Tensor prefix, string[] tensor_names, string[] Dictionary attrs = new(); attrs["dtypes"] = dtypes; var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - "RestoreV2", name, prefix, tensor_names, shape_and_slices + tf.Context, "RestoreV2", name, prefix, tensor_names, shape_and_slices ) { attrs = attrs }); return result; @@ -27236,9 +27236,9 @@ public static Tensor[] restore_v2_eager_fallback(Tensor prefix, string[] tensor_ var shape_and_slices_tensor = ops.convert_to_tensor(shape_and_slices, TF_DataType.TF_STRING); object[] attrs = new object[] { "dtypes", dtypes }; Tensor[] inputs_flat = new Tensor[] { prefix, tensor_names_tensor, shape_and_slices_tensor }; - var result = execute.quick_execute("RestoreV2", dtypes.Length, inputs_flat, attrs, ctx, name); + var result = _execute.quick_execute("RestoreV2", dtypes.Length, inputs_flat, attrs, ctx, name); - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { // TODO(Rinne); record the gradient } @@ -29829,7 +29829,7 @@ public static Tensor sharded_filename(Tensor basename, Tensor shard, Tensor num_ var ctx = tf.Context; if (ctx.executing_eagerly()) { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("ShardedFilename", name, basename, shard, num_shards)); + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "ShardedFilename", name, basename, shard, num_shards)); return result[0]; } var dict = new Dictionary(); @@ -34759,7 +34759,7 @@ public static Tensor string_join(Tensor[] inputs, string separator = null, strin var ctx = tf.Context; if (ctx.executing_eagerly()) { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("StringJoin", name, inputs, "separator", separator)); + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "StringJoin", name, inputs, "separator", separator)); return result[0]; } var dict = new Dictionary(); diff --git a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs index 330903252..c4e8f8c41 100644 --- a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs @@ -25,7 +25,7 @@ public static Operation assign_sub_variable_op(Tensor resource, Tensor value, st if (tf.Context.executing_eagerly()) { tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - "AssignSubVariableOp", name, resource, value)); + tf.Context, "AssignSubVariableOp", name, resource, value)); return null; } @@ -44,7 +44,7 @@ public static Operation assign_add_variable_op(Tensor resource, Tensor value, st { if (tf.Context.executing_eagerly()) { - tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("AssignAddVariableOp", name, + tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AssignAddVariableOp", name, resource, value)); return null; @@ -59,7 +59,7 @@ public static Operation assign_variable_op(Tensor resource, Tensor value, string { if (tf.Context.executing_eagerly()) { - tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("AssignVariableOp", name, + tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AssignVariableOp", name, resource, value)); return null; @@ -74,7 +74,7 @@ public static Tensor var_is_initialized_op(Tensor resource, string name = null) { if (tf.Context.executing_eagerly()) { - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("VarIsInitializedOp", name, + var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "VarIsInitializedOp", name, resource)); return results[0]; @@ -99,7 +99,7 @@ public static Tensor var_handle_op(TF_DataType dtype, Shape shape, { if (tf.Context.executing_eagerly()) { - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("VarHandleOp", name) + var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "VarHandleOp", name) { attrs = ConvertToDict(new { diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs index e0bc037d2..9d52f5161 100644 --- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs +++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs @@ -177,11 +177,11 @@ internal static Tensor _random_flip(Tensor image, int flip_index, int seed, stri if (shape.ndim == 3 || shape.ndim == Unknown) { Tensor uniform_random = random_ops.random_uniform(new int[] { }, 0f, 1.0f, seed: seed); - var mirror_cond = gen_math_ops.less(uniform_random, .5); + var mirror_cond = gen_math_ops.less(uniform_random, ops.convert_to_tensor(.5)); var result = control_flow_ops.cond( pred: mirror_cond, - true_fn: () => gen_array_ops.reverse(image, new { flip_index }), + true_fn: () => gen_array_ops.reverse(image, ops.convert_to_tensor(new int[] { flip_index })), false_fn: () => image, name: scope ); @@ -197,7 +197,7 @@ internal static Tensor _random_flip(Tensor image, int flip_index, int seed, stri var flips = math_ops.round( array_ops.reshape(uniform_random, shape: array_ops.constant(value: new object[] { batch_size[0], 1, 1, 1 }))); flips = math_ops.cast(flips, image.dtype); - var flipped_input = gen_array_ops.reverse(image, new int[] { flip_index + 1 }); + var flipped_input = gen_array_ops.reverse(image, ops.convert_to_tensor(new int[] { flip_index + 1 })); return flips * flipped_input + (1 - flips) * image; } else @@ -222,11 +222,11 @@ internal static Tensor _flip(Tensor image, int flip_index, string scope_name) Shape shape = image.shape; if (shape.ndim == 3 || shape.ndim == Unknown) { - return fix_image_flip_shape(image, gen_array_ops.reverse(image, new { flip_index })); + return fix_image_flip_shape(image, gen_array_ops.reverse(image, ops.convert_to_tensor(new int[] { flip_index }))); } else if (shape.ndim == 4) { - return gen_array_ops.reverse(image, new[] { flip_index + 1 }); + return gen_array_ops.reverse(image, ops.convert_to_tensor(new[] { flip_index + 1 })); } else { @@ -268,15 +268,15 @@ internal static Tensor _rot90_3D(Tensor image, int k, string name_scope) { Tensor _rot90() { - return array_ops.transpose(gen_array_ops.reverse(image, new[] { 1, 0, 2 }), new int[] { 1 }); + return array_ops.transpose(gen_array_ops.reverse(image, ops.convert_to_tensor(new[] { 1, 0, 2 })), new int[] { 1 }); }; Tensor _rot180() { - return gen_array_ops.reverse(image, new[] { 0, 1 }); + return gen_array_ops.reverse(image, ops.convert_to_tensor(new[] { 0, 1 })); }; Tensor _rot270() { - return gen_array_ops.reverse(array_ops.transpose(image, new[] { 1, 0, 2 }), new[] { 1 }); + return gen_array_ops.reverse(array_ops.transpose(image, new[] { 1, 0, 2 }), ops.convert_to_tensor(new[] { 1 })); }; var cases = new[] {math_ops.equal(k, 1), _rot90(), @@ -1389,7 +1389,7 @@ internal static (Tensor, Tensor, Operation[]) _verify_compatible_image_shapes(Te Operation[] checks = new Operation[] { }; checks.append( control_flow_ops.Assert( - gen_math_ops.greater_equal(array_ops.size(shape1_tensor), 3), new[] { shape1, shape2 }, + gen_math_ops.greater_equal(array_ops.size(shape1_tensor), ops.convert_to_tensor(3)), new[] { shape1, shape2 }, summarize: 10)); checks.append( control_flow_ops.Assert( @@ -1762,8 +1762,8 @@ internal static (Tensor, Tensor, Tensor, Tensor) _cross_suppression(Tensor boxes { var batch_size = array_ops.shape(boxes)[0]; var new_slice = array_ops.slice( - boxes, new object[] { 0, inner_idx * tile_size, 0 }, - new object[] { batch_size, tile_size, 4 }); + boxes, new Tensor[] { ops.convert_to_tensor(0), ops.convert_to_tensor(inner_idx * tile_size), ops.convert_to_tensor(0) }, + new Tensor[] { ops.convert_to_tensor(batch_size), ops.convert_to_tensor(tile_size), ops.convert_to_tensor(4) }); var iou = _bbox_overlap(new_slice, box_slice); var box_slice_after_suppression = array_ops.expand_dims( math_ops.cast(math_ops.reduce_all(iou < iou_threshold, new(1)), @@ -1816,8 +1816,8 @@ internal static (Tensor, float, Tensor, int) _suppression_loop_body(Tensor boxes (Tensor, Tensor, Tensor, Tensor) cross_suppression_func(Tensor boxes, Tensor box_slice, Tensor iou_threshold, Tensor inner_idx, int tile_size) => _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size); - var box_slice = array_ops.slice(boxes, new[] { 0, idx * tile_size, 0 }, - new[] { batch_size, tile_size, 4 }); + var box_slice = array_ops.slice(boxes, new Tensor[]{ ops.convert_to_tensor(0), ops.convert_to_tensor(idx * tile_size), ops.convert_to_tensor(0) }, + new Tensor[] { ops.convert_to_tensor(batch_size), ops.convert_to_tensor(tile_size), ops.convert_to_tensor(4) }); var iou = _bbox_overlap(box_slice, box_slice); var mask = array_ops.expand_dims( diff --git a/src/TensorFlowNET.Core/Operations/io_ops.cs b/src/TensorFlowNET.Core/Operations/io_ops.cs index 16e1bac47..0b77689d5 100644 --- a/src/TensorFlowNET.Core/Operations/io_ops.cs +++ b/src/TensorFlowNET.Core/Operations/io_ops.cs @@ -31,7 +31,7 @@ public Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_an try { var result = tf.Runner.TFE_FastPathExecute( - new FastPathOpExecInfo("SaveV2", name, new object[] { prefix, tensor_names, shape_and_slices, tensors })); + new FastPathOpExecInfo(tf.Context, "SaveV2", name, new object[] { prefix, tensor_names, shape_and_slices, tensors })); result = null; return null; } @@ -48,14 +48,14 @@ public Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_an public Operation save_v2_eager_fallback(Tensor prefix, string[] tensor_names, string[] shape_and_slices, Tensor[] tensors, string name, Context ctx) { DataType[] attr_dtypes; - (attr_dtypes, tensors) = execute.onvert_to_mixed_eager_tensors(tensors, ctx); + (attr_dtypes, tensors) = _execute.onvert_to_mixed_eager_tensors(tensors, ctx); prefix = ops.convert_to_tensor(prefix, TF_DataType.TF_STRING); var tensor_names_tensor = ops.convert_to_tensor(tensor_names, TF_DataType.TF_STRING); var shape_and_slices_tensor = ops.convert_to_tensor(shape_and_slices, TF_DataType.TF_STRING); var inputs_flat = tensors.Concat(new Tensor[] { prefix, tensor_names_tensor, shape_and_slices_tensor }).ToArray(); var attrs = new object[] { "dtypes", attr_dtypes }; - var result = execute.quick_execute("SaveV2", 0, inputs_flat, attrs, ctx, name); + var result = _execute.quick_execute("SaveV2", 0, inputs_flat, attrs, ctx, name); result = null; return null; } diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index f7b428bb4..5ded448ac 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -21,6 +21,7 @@ limitations under the License. using Tensorflow.Framework; using static Tensorflow.Binding; using Tensorflow.Operations; +using System.Runtime.CompilerServices; namespace Tensorflow { @@ -39,18 +40,18 @@ public static Tensor abs(Tensor x, string name = null) { return gen_ops.complex_abs(x, Tout: x.dtype.real_dtype(), name: name); } - return gen_math_ops._abs(x, name: name); + return gen_math_ops.abs(x, name: name); }); } public static Tensor add(Tx x, Ty y, string name = null) - => gen_math_ops.add(x, y, name); + => gen_math_ops.add(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); public static Tensor add_v2(Tensor x, Tensor y, string name = null) => tf.Context.ExecuteOp("AddV2", name, new ExecuteOpArgs(x, y)); public static Tensor add_v2(Tx x, Ty y, string name = null) - => gen_math_ops.add_v2(x, y, name); + => gen_math_ops.add_v2(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Adds all input tensors element-wise. @@ -254,9 +255,9 @@ public static Tensor einsum(string equation, Tensors inputs, string name = null) } public static Tensor greater_equal(Tx x, Ty y, string name = null) - => gen_math_ops.greater_equal(x, y, name: name); + => gen_math_ops.greater_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor equal(Tx x, Ty y, string name = null) - => gen_math_ops.equal(x, y, name: name); + => gen_math_ops.equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); /// /// Computes the Gauss error function of `x` element-wise. @@ -274,13 +275,13 @@ public static Tensor multiply(Tensor x, Tensor y, string name = null) => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(x, y)); public static Tensor multiply(Tx x, Ty y, string name = null) - => gen_math_ops.mul(x, y, name: name); + => gen_math_ops.mul(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor not_equal(Tx x, Ty y, string name = null) - => gen_math_ops.not_equal(x, y, name: name); + => gen_math_ops.not_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor mul_no_nan(Tx x, Ty y, string name = null) - => gen_math_ops.mul_no_nan(x, y, name: name); + => gen_math_ops.mul_no_nan(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor scalar_mul(Tscale scale, Tx x, string name = null) => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(scale, x)); @@ -396,7 +397,7 @@ public static Tensor sigmoid(T x, string name = null) }); public static Tensor sign(T x, string name = null) - => gen_math_ops.sign(x, name: name); + => gen_math_ops.sign(ops.convert_to_tensor(x), name: name); public static Tensor sin(Tensor x, string name = null) => tf.Context.ExecuteOp("Sin", name, new ExecuteOpArgs(x)); @@ -421,7 +422,7 @@ public static Tensor square(Tensor x, string name = null) public static Tensor subtract(Tx x, Ty y, string name = null) { - return gen_math_ops.sub(x, y, name); + return gen_math_ops.sub(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); } public static Tensor log(Tensor x, string name = null) @@ -455,8 +456,8 @@ public static Tensor linspace(Tensor start, Tensor stop, int num = 50, string na var axis_tensor = array_ops.where_v2(constant_op.constant(axis >= 0), x: axis, y: ndims + axis); // The purpose is to avoid having negative values when repeating. - var num_fill = gen_math_ops.maximum(num_int_tensor - 2, 0); - var n_steps = gen_math_ops.maximum(num_int_tensor - 1, 1); + var num_fill = gen_math_ops.maximum(num_int_tensor - 2, ops.convert_to_tensor(0)); + var n_steps = gen_math_ops.maximum(num_int_tensor - 1, ops.convert_to_tensor(1)); var delta = (expanded_stop - expanded_start) / cast(n_steps, expanded_stop.dtype); var range_end = array_ops.where_v2(num_int_tensor >= 0, n_steps, -1); @@ -503,7 +504,7 @@ public static Tensor reduced_shape(Tensor input_shape, Tensor axes) var axes_shape = array_ops.shape(axes); var rng = math_ops.range(input_rank); var a1 = new Tensor[] { rng, axes }; - var fill = gen_array_ops.fill(axes_shape, 1); + var fill = gen_array_ops.fill(axes_shape, ops.convert_to_tensor(1)); var a2 = new Tensor[] { input_shape, fill }; return gen_data_flow_ops.dynamic_stitch(a1, a2); @@ -528,7 +529,7 @@ public static Tensor reciprocal(Tensor x, string name = null) /// public static Tensor reduce_all(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null) { - var all = gen_math_ops._all(input_tensor, + var all = gen_math_ops.all(input_tensor, _ReductionDims(input_tensor, axis), keepdims, name: name); @@ -581,23 +582,23 @@ public static Tensor reduce_logsumexp(Tensor input_tensor, Axis axis = null, boo public static Tensor reduce_any(Tensor input_tensor, Axis axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); - var max = (axis != null) ? gen_math_ops._any(input_tensor, axis, keepdims, name) : - gen_math_ops._any(input_tensor, r, keepdims, name); + var max = (axis != null) ? gen_math_ops.any(input_tensor, axis, keepdims, name) : + gen_math_ops.any(input_tensor, r, keepdims, name); return _may_reduce_to_scalar(keepdims, axis, max); } public static Tensor reduce_max(Tensor input_tensor, Axis axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); - var max = (axis != null) ? gen_math_ops._max(input_tensor, axis, keepdims, name) : - gen_math_ops._max(input_tensor, r, keepdims, name); + var max = (axis != null) ? gen_math_ops.max(input_tensor, axis, keepdims, name) : + gen_math_ops.max(input_tensor, r, keepdims, name); return _may_reduce_to_scalar(keepdims, axis, max); } public static Tensor reduce_min(Tensor input_tensor, Axis axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); - var min = gen_math_ops._min(input_tensor, r, keepdims, name); + var min = gen_math_ops.min(input_tensor, r, keepdims, name); return _may_reduce_to_scalar(keepdims, axis, min); } @@ -643,7 +644,7 @@ public static Tensor __case__(Tensor x, TF_DataType dtype, string name = null) public static Tensor reduce_sum(Tensor input_tensor, Tensor axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); - var m = gen_math_ops._sum(input_tensor, r, keep_dims: keepdims, name: name); + var m = gen_math_ops.sum(input_tensor, r, keep_dims: keepdims, name: name); return _may_reduce_to_scalar(keepdims, axis, m); } @@ -752,10 +753,10 @@ public static Tensor floordiv(Tensor x, Tensor y, string name = null) } public static Tensor minimum(Tx x, Ty y, string name = null) - => gen_math_ops.minimum(x, y, name: name); + => gen_math_ops.minimum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor maximum(Tx x, Ty y, string name = null) - => gen_math_ops.maximum(x, y, name: name); + => gen_math_ops.maximum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); /// /// Multiplies matrix `a` by matrix `b`, producing `a` * `b`. diff --git a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs index d24e81ef4..ca4b885f7 100644 --- a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs @@ -236,7 +236,7 @@ public static Tensor zero_fraction(Tensor value, string name = null) Tensor size = array_ops.size(value, out_type: dtypes.int64); Tensor zero_fraction_float32 = null; - size = gen_math_ops.less_equal(size, dtypes.int32.max()); + size = gen_math_ops.less_equal(size, ops.convert_to_tensor(dtypes.int32.max())); Tensor num_nonzero = control_flow_ops.cond( size, () => math_ops.cast(_count_nonzero(value, dtype: dtypes.int32), TF_DataType.TF_INT64), diff --git a/src/TensorFlowNET.Core/Operations/nn_ops.cs b/src/TensorFlowNET.Core/Operations/nn_ops.cs index b8d5103c4..00d7d316b 100644 --- a/src/TensorFlowNET.Core/Operations/nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/nn_ops.cs @@ -55,7 +55,7 @@ public static Tensor bias_add(Tensor value, return tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => { name = scope; - return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); + return gen_nn_ops.bias_add(value, ops.convert_to_tensor(bias), data_format: data_format, name: name); }); } @@ -117,7 +117,7 @@ public static Tensor in_top_k(Tensor predictions, Tensor targets, int k, string { return tf_with(ops.name_scope(name, "in_top_k"), delegate { - return gen_nn_ops.in_top_kv2(predictions, targets, k, name: name); + return gen_nn_ops.in_top_kv2(predictions, targets, ops.convert_to_tensor(k), name: name); }); } @@ -222,8 +222,8 @@ public static Tensor sparse_softmax_cross_entropy_with_logits(Tensor labels = nu // Check if no reshapes are required. if (logits.shape.ndim == 2) { - var (cost, _) = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( - precise_logits, labels, name: name); + var cost = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( + precise_logits, labels, name: name)[0]; if (logits.dtype == dtypes.float16) return math_ops.cast(cost, dtypes.float32); else @@ -261,7 +261,8 @@ public static Tensor softmax_cross_entropy_with_logits_v2_helper(Tensor labels, // The second output tensor contains the gradients. We use it in // _CrossEntropyGrad() in nn_grad but not here. - var (cost, unused_backprop) = gen_nn_ops.softmax_cross_entropy_with_logits(precise_logits, labels, name: name); + var entropy = gen_nn_ops.softmax_cross_entropy_with_logits(precise_logits, labels, name: name); + var (cost, unused_backprop) = (entropy[0], entropy[1]); // The output cost shape should be the input minus axis. var output_shape = array_ops.slice(input_shape, diff --git a/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs b/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs index b1dbf5864..29dc525df 100644 --- a/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs +++ b/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs @@ -78,7 +78,7 @@ public static RowPartition from_value_rowids(Tensor value_rowids, minlength: nrows_int32, maxlength: nrows_int32, dtype: value_rowids.dtype); - var row_splits = array_ops.concat(new object[] + var row_splits = array_ops.concat(new Tensor[] { ops.convert_to_tensor(new long[] { 0 }), tf.cumsum(row_lengths) diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs index ef71be2c0..c7a631d8b 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs @@ -154,103 +154,103 @@ public partial class Tensor public static Tensor operator >(Tensor lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); public static Tensor operator >(Tensor lhs, NDArray rhs) => gen_math_ops.greater(lhs, rhs); public static Tensor operator >(NDArray lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, sbyte rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(sbyte lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, byte rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(byte lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, short rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(short lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, ushort rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(ushort lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, int rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(int lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, uint rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(uint lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, ulong rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(ulong lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, long rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(long lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, float rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(float lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, double rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(double lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, Complex rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Complex lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); + public static Tensor operator >(Tensor lhs, sbyte rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(sbyte lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, byte rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(byte lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, short rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(short lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, ushort rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(ushort lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, int rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(int lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, uint rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(uint lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, ulong rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(ulong lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), ops.convert_to_tensor(rhs)); + public static Tensor operator >(Tensor lhs, long rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(long lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, float rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(float lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, double rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(double lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, Complex rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(Complex lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); public static Tensor operator <(Tensor lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); public static Tensor operator <(Tensor lhs, NDArray rhs) => gen_math_ops.less(lhs, rhs); public static Tensor operator <(NDArray lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, sbyte rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(sbyte lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, byte rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(byte lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, short rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(short lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, ushort rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(ushort lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, int rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(int lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, uint rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(uint lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, ulong rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(ulong lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, long rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(long lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, float rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(float lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, double rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(double lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, Complex rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Complex lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); + public static Tensor operator <(Tensor lhs, sbyte rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(sbyte lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, byte rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(byte lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, short rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(short lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, ushort rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(ushort lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, int rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(int lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, uint rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(uint lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, ulong rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(ulong lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, long rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(long lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, float rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(float lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, double rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(double lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, Complex rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(Complex lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); public static Tensor operator >=(Tensor lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); public static Tensor operator >=(Tensor lhs, NDArray rhs) => gen_math_ops.greater_equal(lhs, rhs); public static Tensor operator >=(NDArray lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, sbyte rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(sbyte lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, byte rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(byte lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, short rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(short lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, ushort rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(ushort lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, int rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(int lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, uint rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(uint lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, ulong rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(ulong lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, long rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(long lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, float rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(float lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, double rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(double lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, Complex rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Complex lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); + public static Tensor operator >=(Tensor lhs, sbyte rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(sbyte lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, byte rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(byte lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, short rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(short lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, ushort rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(ushort lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, int rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(int lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, uint rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(uint lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, ulong rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(ulong lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, long rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(long lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, float rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(float lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, double rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(double lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, Complex rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(Complex lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); public static Tensor operator <=(Tensor lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); public static Tensor operator <=(Tensor lhs, NDArray rhs) => gen_math_ops.less_equal(lhs, rhs); public static Tensor operator <=(NDArray lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, sbyte rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(sbyte lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, byte rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(byte lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, short rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(short lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, ushort rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(ushort lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, int rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(int lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, uint rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(uint lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, ulong rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(ulong lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, long rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(long lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, float rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(float lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, double rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(double lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, Complex rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Complex lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); + public static Tensor operator <=(Tensor lhs, sbyte rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(sbyte lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, byte rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(byte lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, short rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(short lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, ushort rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(ushort lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, int rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(int lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, uint rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(uint lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, ulong rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(ulong lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, long rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(long lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, float rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(float lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, double rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(double lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, Complex rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(Complex lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); public static Tensor operator -(Tensor x) => gen_math_ops.neg(x); diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs index b98495a32..d063ee39f 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensors.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs @@ -161,6 +161,9 @@ public unsafe static explicit operator string(Tensors tensor) EnsureSingleTensor(tensor, "explicit conversion to string"); return (string)tensor[0]; } + + public static explicit operator object[](Tensors tensors) + => tensors.items.ToArray(); #endregion #region Implicit Conversions diff --git a/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs b/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs index 10a85d9d9..e16f82c05 100644 --- a/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs +++ b/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs @@ -106,7 +106,7 @@ public virtual SaverDef _build_internal(IVariableV1[] names_to_saveables, name = scope; // Add a placeholder string tensor for the filename. - var filename_tensor = array_ops.placeholder_with_default(string.IsNullOrEmpty(filename) ? "model" : filename, shape: new int[0], name: "filename"); + var filename_tensor = array_ops.placeholder_with_default(tf.convert_to_tensor(string.IsNullOrEmpty(filename) ? "model" : filename), shape: new int[0], name: "filename"); // Keep the name "Const" for backwards compatibility. filename_tensor = gen_array_ops.placeholder_with_default(filename_tensor, shape: new int[0], name: "Const"); diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs index a7e1d7e34..b93c6aed7 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs @@ -57,7 +57,8 @@ Tensors permutation(Tensors tensor) IDatasetV2 slice_batch_indices(Tensor indices) { var num_in_full_batch = num_full_batches * _batch_size; - var first_k_indices = array_ops.slice(indices, new int[] { 0 }, new int[] { num_in_full_batch }); + var first_k_indices = array_ops.slice(indices, new Tensor[] { ops.convert_to_tensor(0) }, + new Tensor[] { ops.convert_to_tensor(num_in_full_batch) }); first_k_indices = array_ops.reshape(first_k_indices, new int[] { num_full_batches, _batch_size }); var flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices); if (_partial_batch_size > 0) @@ -81,7 +82,7 @@ IDatasetV2 slice_inputs(IDatasetV2 indices_dataset, Tensors elements) { var indices = inputs[0]; var results = inputs.Skip(1) - .Select(x => gen_array_ops.gather_v2(x, indices, 0)) + .Select(x => array_ops.gather(x, indices, axis: 0)) .ToArray(); return new Tensors(results); }, -1); diff --git a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs index b1cc2446c..aa6617ddc 100644 --- a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs +++ b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs @@ -79,7 +79,7 @@ protected override Tensors Call(Tensors inputs, Tensor state = null, bool? train } else { - outputs = gen_math_ops.mat_mul(inputs, kernel.AsTensor()); + outputs = math_ops.matmul(inputs, kernel.AsTensor()); } if (args.UseBias) diff --git a/src/TensorFlowNET.Keras/Losses/Huber.cs b/src/TensorFlowNET.Keras/Losses/Huber.cs index a256786f1..7169ba461 100644 --- a/src/TensorFlowNET.Keras/Losses/Huber.cs +++ b/src/TensorFlowNET.Keras/Losses/Huber.cs @@ -30,7 +30,7 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro return gen_math_ops.mean(array_ops.where_v2(abs_error <= delta, half * math_ops.pow(error, 2), half * math_ops.pow(delta, 2) + delta * (abs_error - delta)), - axis: -1); + ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/LogCosh.cs b/src/TensorFlowNET.Keras/Losses/LogCosh.cs index 8acbbe9d2..7cfd4f67b 100644 --- a/src/TensorFlowNET.Keras/Losses/LogCosh.cs +++ b/src/TensorFlowNET.Keras/Losses/LogCosh.cs @@ -20,7 +20,8 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); Tensor x = y_pred_dispatch - y_true_cast; - return gen_math_ops.mean(x + gen_math_ops.softplus(-2.0 * x) - math_ops.cast(math_ops.log(tf.Variable(2.0)), x.dtype), axis: -1); + return gen_math_ops.mean(x + gen_nn_ops.softplus(-2.0 * x) - math_ops.cast(math_ops.log(tf.Variable(2.0)), x.dtype), + ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs b/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs index 5d0f83d43..c203bc5ad 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs @@ -17,7 +17,7 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro { Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - return gen_math_ops.mean(math_ops.abs(y_pred_dispatch - y_true_cast), axis: -1); + return gen_math_ops.mean(math_ops.abs(y_pred_dispatch - y_true_cast), ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs b/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs index 3295b12b1..8dcaa1bcc 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs @@ -18,7 +18,7 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); Tensor diff = math_ops.abs(y_true_cast - y_pred_dispatch) / gen_math_ops.maximum(math_ops.abs(y_true_cast), gen_math_ops.cast(tf.constant(1e-7), y_pred_dispatch.dtype)); - return gen_math_ops.cast(tf.constant(100), y_pred_dispatch.dtype) * gen_math_ops.mean(diff, axis: -1); + return gen_math_ops.cast(tf.constant(100), y_pred_dispatch.dtype) * gen_math_ops.mean(diff, ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs b/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs index 6ae7d86d4..73cddef14 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs @@ -17,7 +17,7 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro { Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - return gen_math_ops.mean(gen_math_ops.squared_difference(y_pred_dispatch, y_true_cast), axis: -1); + return gen_math_ops.mean(gen_math_ops.squared_difference(y_pred_dispatch, y_true_cast), ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs b/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs index 22b5a6ff9..e29659218 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs @@ -20,14 +20,14 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); Tensor first_log=null, second_log=null; if (y_pred_dispatch.dtype == TF_DataType.TF_DOUBLE) { - first_log = math_ops.log(gen_math_ops.maximum(y_pred_dispatch, 1e-7) + 1.0); - second_log = math_ops.log(gen_math_ops.maximum(y_true_cast, 1e-7) + 1.0); + first_log = math_ops.log(math_ops.maximum(y_pred_dispatch, 1e-7) + 1.0); + second_log = math_ops.log(math_ops.maximum(y_true_cast, 1e-7) + 1.0); } else { - first_log = math_ops.log(gen_math_ops.maximum(y_pred_dispatch, 1e-7f) + 1.0f); - second_log = math_ops.log(gen_math_ops.maximum(y_true_cast, 1e-7f) + 1.0f); + first_log = math_ops.log(math_ops.maximum(y_pred_dispatch, 1e-7f) + 1.0f); + second_log = math_ops.log(math_ops.maximum(y_true_cast, 1e-7f) + 1.0f); } - return gen_math_ops.mean(gen_math_ops.squared_difference(first_log, second_log), axis: -1); + return gen_math_ops.mean(gen_math_ops.squared_difference(first_log, second_log), ops.convert_to_tensor(-1)); } } } diff --git a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs index a31dea7d2..c637cf858 100644 --- a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs +++ b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs @@ -25,8 +25,8 @@ private void _testWhileContextHelper(int maximum_iterations) // TODO: implement missing code dependencies var sess = this.cached_session(); var i = constant_op.constant(0, name: "i"); - var c = new Func(x => gen_math_ops.less(x, 10, name: "c")); - var b = new Func(x => gen_math_ops.add(x, 1, name: "c")); + var c = new Func(x => gen_math_ops.less(x, ops.convert_to_tensor(10), name: "c")); + var b = new Func(x => math_ops.add(x, 1, name: "c")); //control_flow_ops.while_loop( // c, b, i , maximum_iterations: tf.constant(maximum_iterations)); foreach (Operation op in sess.graph.get_operations()) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index 92afd6a3f..f240817b4 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -260,7 +260,7 @@ public void testConcatGrad() public void testStopGradientFunction() { var ap = tf.constant(1f); - var b = tf.tanh(ap) + gen_array_ops.stop_gradient(ap); + var b = tf.tanh(ap) + array_ops.stop_gradient(ap); var g = tf.gradients(b, ap); var sess = tf.Session(); var result = sess.run(g); diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs index 6a12ed20b..72f598e46 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs @@ -18,7 +18,7 @@ public void Slice() var input_array = tf.constant(np.array(new int[] { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }).reshape((3,2,3))); var indices = tf.constant(np.array(new int[] { 0, 2 })); - var r1 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 1, 1, 3 }); + var r1 = array_ops.slice(input_array, ops.convert_n_to_tensor(new object[] { 1, 0, 0 }), ops.convert_n_to_tensor(new object[] { 1, 1, 3 })); Assert.AreEqual(new Shape(1,1,3), r1.shape); var r1np = r1.numpy(); Assert.AreEqual(r1np[0, 0, 0], 3); @@ -26,7 +26,7 @@ public void Slice() Assert.AreEqual(r1np[0, 0, 2], 3); - var r2 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 1, 2, 3 }); + var r2 = array_ops.slice(input_array, ops.convert_n_to_tensor(new object[] { 1, 0, 0 }), ops.convert_n_to_tensor(new object[] { 1, 2, 3 })); Assert.AreEqual(new Shape(1, 2, 3), r2.shape); var r2np = r2.numpy(); Assert.AreEqual(r2np[0, 0, 0], 3); @@ -36,7 +36,7 @@ public void Slice() Assert.AreEqual(r2np[0, 1, 1], 4); Assert.AreEqual(r2np[0, 1, 2], 4); - var r3 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 2, 1, 3 }); + var r3 = array_ops.slice(input_array, ops.convert_n_to_tensor(new object[] { 1, 0, 0 }), ops.convert_n_to_tensor(new object[] { 2, 1, 3 })); Assert.AreEqual(new Shape(2, 1, 3), r3.shape); var r3np = r3.numpy(); Assert.AreEqual(r3np[0, 0, 0], 3); From 854e3d76469124fed6d5ad005179ef0cd8ed3dc4 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Mon, 8 May 2023 02:07:56 +0800 Subject: [PATCH 007/182] build: revise package dependencies. --- TensorFlow.NET.sln | 20 -------------------- Tensorflow.CodeGen/Tensorflow.CodeGen.csproj | 2 +- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 8d5488146..2950c5d23 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -37,8 +37,6 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistH EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{BADBB104-2F03-4824-A249-803A871D8122}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "protobuf.Text", "..\protobuf.Text\src\protobuf.Text\protobuf.Text.csproj", "{151B3A8A-8576-4190-BD58-F42944A49718}" -EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -304,24 +302,6 @@ Global {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.Build.0 = Release|Any CPU {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.ActiveCfg = Release|Any CPU {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.Build.0 = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|Any CPU.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x64.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x64.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x86.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x86.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|Any CPU.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|Any CPU.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x64.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x64.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x86.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x86.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|Any CPU.ActiveCfg = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|Any CPU.Build.0 = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x64.ActiveCfg = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x64.Build.0 = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x86.ActiveCfg = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index 865db126b..5948fb2c3 100644 --- a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -9,10 +9,10 @@ + - From 87b34520be40f5cf8d8a91a3d7fe73ff0134191a Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Sat, 13 May 2023 21:45:16 +0800 Subject: [PATCH 008/182] fix: error when using graph in multi-threads. --- src/TensorFlowNET.Core/Device/DeviceSpec.cs | 5 ++- .../Basics/ThreadSafeTest.cs | 41 +++++++++++++++++++ 2 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 test/TensorFlowNET.UnitTest/Basics/ThreadSafeTest.cs diff --git a/src/TensorFlowNET.Core/Device/DeviceSpec.cs b/src/TensorFlowNET.Core/Device/DeviceSpec.cs index f4ea8cf05..255191cb5 100644 --- a/src/TensorFlowNET.Core/Device/DeviceSpec.cs +++ b/src/TensorFlowNET.Core/Device/DeviceSpec.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.Text; using System.Threading.Tasks; @@ -7,8 +8,8 @@ namespace Tensorflow.Device { public class DeviceSpec { - private static Dictionary _STRING_TO_COMPONENTS_CACHE = new(); - private static Dictionary _COMPONENTS_TO_STRING_CACHE = new(); + private static ConcurrentDictionary _STRING_TO_COMPONENTS_CACHE = new(); + private static ConcurrentDictionary _COMPONENTS_TO_STRING_CACHE = new(); private string _job; private int _replica; private int _task; diff --git a/test/TensorFlowNET.UnitTest/Basics/ThreadSafeTest.cs b/test/TensorFlowNET.UnitTest/Basics/ThreadSafeTest.cs new file mode 100644 index 000000000..6a633448c --- /dev/null +++ b/test/TensorFlowNET.UnitTest/Basics/ThreadSafeTest.cs @@ -0,0 +1,41 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Tensorflow; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.Basics +{ + [TestClass] + public class ThreadSafeTest + { + [TestMethod] + public void GraphWithMultiThreads() + { + List threads = new List(); + + const int THREADS_COUNT = 5; + + for (int t = 0; t < THREADS_COUNT; t++) + { + Thread thread = new Thread(() => + { + Graph g = new Graph(); + Session session = new Session(g); + session.as_default(); + var input = tf.placeholder(tf.int32, shape: new Shape(6)); + var op = tf.reshape(input, new int[] { 2, 3 }); + }); + thread.Start(); + threads.Add(thread); + } + + threads.ForEach(t => t.Join()); + } + } +} From 6511737d78966472dea5139af95734e0f93117c5 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Sat, 13 May 2023 22:36:23 +0800 Subject: [PATCH 009/182] docs: add discord info to readme. --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c3ffdbaa5..71d6bdf4c 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ **TensorFlow.NET** (TF.NET) provides a .NET Standard binding for [TensorFlow](https://www.tensorflow.org/). It aims to implement the complete Tensorflow API in C# which allows .NET developers to develop, train and deploy Machine Learning models with the cross-platform .NET Standard framework. TensorFlow.NET has built-in Keras high-level interface and is released as an independent package [TensorFlow.Keras](https://www.nuget.org/packages/TensorFlow.Keras/). +![Discord](https://img.shields.io/discord/1106946823282761851?label=Discord) [![Join the chat at https://gitter.im/publiclab/publiclab](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sci-sharp/community) [![CI Status](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml/badge.svg)](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml) [![NuGet Badge](https://buildstats.info/nuget/TensorFlow.NET?includePreReleases=true)](https://www.nuget.org/packages/TensorFlow.NET) @@ -238,9 +239,9 @@ Buy our book to make open source project be sustainable [TensorFlow.NET实战](h ### Contact -Follow us on [Twitter](https://twitter.com/ScisharpStack), [Facebook](https://www.facebook.com/scisharp.stack.9), [Medium](https://medium.com/scisharp), [LinkedIn](https://www.linkedin.com/company/scisharp-stack/). +Join our chat on [Discord](https://discord.gg/quBc2jrz) or [Gitter](https://gitter.im/sci-sharp/community). -Join our chat on [Gitter](https://gitter.im/sci-sharp/community). +Follow us on [Twitter](https://twitter.com/ScisharpStack), [Facebook](https://www.facebook.com/scisharp.stack.9), [Medium](https://medium.com/scisharp), [LinkedIn](https://www.linkedin.com/company/scisharp-stack/). TensorFlow.NET is a part of [SciSharp STACK](https://scisharp.github.io/SciSharp/)
From 8208f762b05c5f5728ac31ed44a2e8e3f0fd31c6 Mon Sep 17 00:00:00 2001 From: Rinne Date: Mon, 15 May 2023 04:14:23 +0800 Subject: [PATCH 010/182] docs: update readme file. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 71d6bdf4c..03f30d2b2 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ **TensorFlow.NET** (TF.NET) provides a .NET Standard binding for [TensorFlow](https://www.tensorflow.org/). It aims to implement the complete Tensorflow API in C# which allows .NET developers to develop, train and deploy Machine Learning models with the cross-platform .NET Standard framework. TensorFlow.NET has built-in Keras high-level interface and is released as an independent package [TensorFlow.Keras](https://www.nuget.org/packages/TensorFlow.Keras/). -![Discord](https://img.shields.io/discord/1106946823282761851?label=Discord) +[![Discord](https://img.shields.io/discord/1106946823282761851?label=Discord)](https://discord.gg/quBc2jrz) [![Join the chat at https://gitter.im/publiclab/publiclab](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sci-sharp/community) [![CI Status](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml/badge.svg)](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml) [![NuGet Badge](https://buildstats.info/nuget/TensorFlow.NET?includePreReleases=true)](https://www.nuget.org/packages/TensorFlow.NET) From b26c37ab20925d976dd94604c9a3386e0b5eb288 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Tue, 16 May 2023 02:40:39 +0800 Subject: [PATCH 011/182] build: add native library splitter and adjust directory structure. --- TensorFlow.NET.sln | 21 ++ .../Crash/RepeatDataSetCrash.cs | 0 .../Leak/GpuLeakByCNN.cs | 0 .../Leak/SavedModelCleanup.cs | 0 .../Leak/TestModel/saved_model/saved_model.pb | Bin .../variables/variables.data-00000-of-00001 | Bin .../saved_model/variables/variables.index | Bin .../TensorFlowNET.Benchmarks}/Program.cs | 0 .../TensorFlowNET.Benchmarks}/README.md | 0 .../TensorBenchmark.cs | 0 .../Tensorflow.Benchmark.csproj | 0 .../Unmanaged/StructCastBenchmark.cs | 0 .../TensorFlowNET.Console/Diagnostician.cs | 0 .../TensorFlowNET.Console/Exploring.cs | 0 .../TensorFlowNET.Console/MemoryBasicTest.cs | 0 .../MemoryFuncGraphTest.cs | 0 .../TensorFlowNET.Console/MemoryKerasTest.cs | 0 .../TensorFlowNET.Console/MemoryMonitor.cs | 0 .../TensorFlowNET.Console/Program.cs | 0 .../TensorFlowNET.Console/SimpleRnnTest.cs | 0 .../Tensorflow.Console.csproj | 0 .../DescriptionGenerator.cs | 0 .../Tensorflow.CodeGen}/FunctionGenerator.cs | 0 .../Tensorflow.CodeGen}/GenOpsWriter.cs | 0 .../Tensorflow.CodeGen}/OpClassifier.cs | 0 .../Tensorflow.CodeGen}/Program.cs | 0 .../Tensorflow.CodeGen.csproj | 0 .../Tensorflow.CodeGen}/Utils.cs | 0 .../Program.cs | 212 ++++++++++++++++++ ...orflow.Redist.NativeLibrarySplitter.csproj | 10 + .../EmptyClass.cs | 0 .../Tensorflow.UnitTest.RedistHolder.csproj | 0 .../scripts}/Copy-NativeTensorFlowLibs.ps1 | 0 .../tensorflowlib}/README.md | 0 34 files changed, 243 insertions(+) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Crash/RepeatDataSetCrash.cs (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Leak/GpuLeakByCNN.cs (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Leak/SavedModelCleanup.cs (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Leak/TestModel/saved_model/saved_model.pb (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Leak/TestModel/saved_model/variables/variables.data-00000-of-00001 (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Leak/TestModel/saved_model/variables/variables.index (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Program.cs (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/README.md (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/TensorBenchmark.cs (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Tensorflow.Benchmark.csproj (100%) rename {src/TensorFlowNet.Benchmarks => tools/TensorFlowNET.Benchmarks}/Unmanaged/StructCastBenchmark.cs (100%) rename {src => tools}/TensorFlowNET.Console/Diagnostician.cs (100%) rename {src => tools}/TensorFlowNET.Console/Exploring.cs (100%) rename {src => tools}/TensorFlowNET.Console/MemoryBasicTest.cs (100%) rename {src => tools}/TensorFlowNET.Console/MemoryFuncGraphTest.cs (100%) rename {src => tools}/TensorFlowNET.Console/MemoryKerasTest.cs (100%) rename {src => tools}/TensorFlowNET.Console/MemoryMonitor.cs (100%) rename {src => tools}/TensorFlowNET.Console/Program.cs (100%) rename {src => tools}/TensorFlowNET.Console/SimpleRnnTest.cs (100%) rename {src => tools}/TensorFlowNET.Console/Tensorflow.Console.csproj (100%) rename {Tensorflow.CodeGen => tools/Tensorflow.CodeGen}/DescriptionGenerator.cs (100%) rename {Tensorflow.CodeGen => tools/Tensorflow.CodeGen}/FunctionGenerator.cs (100%) rename {Tensorflow.CodeGen => tools/Tensorflow.CodeGen}/GenOpsWriter.cs (100%) rename {Tensorflow.CodeGen => tools/Tensorflow.CodeGen}/OpClassifier.cs (100%) rename {Tensorflow.CodeGen => tools/Tensorflow.CodeGen}/Program.cs (100%) rename {Tensorflow.CodeGen => tools/Tensorflow.CodeGen}/Tensorflow.CodeGen.csproj (100%) rename {Tensorflow.CodeGen => tools/Tensorflow.CodeGen}/Utils.cs (100%) create mode 100644 tools/Tensorflow.Redist.NativeLibrarySplitter/Program.cs create mode 100644 tools/Tensorflow.Redist.NativeLibrarySplitter/Tensorflow.Redist.NativeLibrarySplitter.csproj rename {helpers => tools}/Tensorflow.UnitTest.RedistHolder/EmptyClass.cs (100%) rename {helpers => tools}/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj (100%) rename {scripts => tools/scripts}/Copy-NativeTensorFlowLibs.ps1 (100%) rename {tensorflowlib => tools/tensorflowlib}/README.md (100%) diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 2950c5d23..ac6e6afae 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -37,6 +37,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistH EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{BADBB104-2F03-4824-A249-803A871D8122}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow.Redist.NativeLibrarySplitter", "NativeLibrarySplitter\Tensorflow.Redist.NativeLibrarySplitter.csproj", "{B85FA7C7-1E8D-4567-B3F4-605955557DAE}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -302,6 +304,24 @@ Global {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.Build.0 = Release|Any CPU {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.ActiveCfg = Release|Any CPU {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.Build.0 = Release|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|x64.ActiveCfg = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|x64.Build.0 = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|x86.ActiveCfg = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|x86.Build.0 = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|Any CPU.Build.0 = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|x64.ActiveCfg = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|x64.Build.0 = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|x86.ActiveCfg = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|x86.Build.0 = Debug|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|Any CPU.Build.0 = Release|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|x64.ActiveCfg = Release|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|x64.Build.0 = Release|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|x86.ActiveCfg = Release|Any CPU + {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -321,6 +341,7 @@ Global {7DEA8760-E401-4872-81F3-405F185A13A0} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} {62D543A2-8846-45A3-829B-5754B094A8E2} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {BADBB104-2F03-4824-A249-803A871D8122} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {B85FA7C7-1E8D-4567-B3F4-605955557DAE} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} diff --git a/src/TensorFlowNet.Benchmarks/Crash/RepeatDataSetCrash.cs b/tools/TensorFlowNET.Benchmarks/Crash/RepeatDataSetCrash.cs similarity index 100% rename from src/TensorFlowNet.Benchmarks/Crash/RepeatDataSetCrash.cs rename to tools/TensorFlowNET.Benchmarks/Crash/RepeatDataSetCrash.cs diff --git a/src/TensorFlowNet.Benchmarks/Leak/GpuLeakByCNN.cs b/tools/TensorFlowNET.Benchmarks/Leak/GpuLeakByCNN.cs similarity index 100% rename from src/TensorFlowNet.Benchmarks/Leak/GpuLeakByCNN.cs rename to tools/TensorFlowNET.Benchmarks/Leak/GpuLeakByCNN.cs diff --git a/src/TensorFlowNet.Benchmarks/Leak/SavedModelCleanup.cs b/tools/TensorFlowNET.Benchmarks/Leak/SavedModelCleanup.cs similarity index 100% rename from src/TensorFlowNet.Benchmarks/Leak/SavedModelCleanup.cs rename to tools/TensorFlowNET.Benchmarks/Leak/SavedModelCleanup.cs diff --git a/src/TensorFlowNet.Benchmarks/Leak/TestModel/saved_model/saved_model.pb b/tools/TensorFlowNET.Benchmarks/Leak/TestModel/saved_model/saved_model.pb similarity index 100% rename from src/TensorFlowNet.Benchmarks/Leak/TestModel/saved_model/saved_model.pb rename to tools/TensorFlowNET.Benchmarks/Leak/TestModel/saved_model/saved_model.pb diff --git a/src/TensorFlowNet.Benchmarks/Leak/TestModel/saved_model/variables/variables.data-00000-of-00001 b/tools/TensorFlowNET.Benchmarks/Leak/TestModel/saved_model/variables/variables.data-00000-of-00001 similarity index 100% rename from src/TensorFlowNet.Benchmarks/Leak/TestModel/saved_model/variables/variables.data-00000-of-00001 rename to tools/TensorFlowNET.Benchmarks/Leak/TestModel/saved_model/variables/variables.data-00000-of-00001 diff --git a/src/TensorFlowNet.Benchmarks/Leak/TestModel/saved_model/variables/variables.index b/tools/TensorFlowNET.Benchmarks/Leak/TestModel/saved_model/variables/variables.index similarity index 100% rename from src/TensorFlowNet.Benchmarks/Leak/TestModel/saved_model/variables/variables.index rename to tools/TensorFlowNET.Benchmarks/Leak/TestModel/saved_model/variables/variables.index diff --git a/src/TensorFlowNet.Benchmarks/Program.cs b/tools/TensorFlowNET.Benchmarks/Program.cs similarity index 100% rename from src/TensorFlowNet.Benchmarks/Program.cs rename to tools/TensorFlowNET.Benchmarks/Program.cs diff --git a/src/TensorFlowNet.Benchmarks/README.md b/tools/TensorFlowNET.Benchmarks/README.md similarity index 100% rename from src/TensorFlowNet.Benchmarks/README.md rename to tools/TensorFlowNET.Benchmarks/README.md diff --git a/src/TensorFlowNet.Benchmarks/TensorBenchmark.cs b/tools/TensorFlowNET.Benchmarks/TensorBenchmark.cs similarity index 100% rename from src/TensorFlowNet.Benchmarks/TensorBenchmark.cs rename to tools/TensorFlowNET.Benchmarks/TensorBenchmark.cs diff --git a/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj b/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj similarity index 100% rename from src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj rename to tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj diff --git a/src/TensorFlowNet.Benchmarks/Unmanaged/StructCastBenchmark.cs b/tools/TensorFlowNET.Benchmarks/Unmanaged/StructCastBenchmark.cs similarity index 100% rename from src/TensorFlowNet.Benchmarks/Unmanaged/StructCastBenchmark.cs rename to tools/TensorFlowNET.Benchmarks/Unmanaged/StructCastBenchmark.cs diff --git a/src/TensorFlowNET.Console/Diagnostician.cs b/tools/TensorFlowNET.Console/Diagnostician.cs similarity index 100% rename from src/TensorFlowNET.Console/Diagnostician.cs rename to tools/TensorFlowNET.Console/Diagnostician.cs diff --git a/src/TensorFlowNET.Console/Exploring.cs b/tools/TensorFlowNET.Console/Exploring.cs similarity index 100% rename from src/TensorFlowNET.Console/Exploring.cs rename to tools/TensorFlowNET.Console/Exploring.cs diff --git a/src/TensorFlowNET.Console/MemoryBasicTest.cs b/tools/TensorFlowNET.Console/MemoryBasicTest.cs similarity index 100% rename from src/TensorFlowNET.Console/MemoryBasicTest.cs rename to tools/TensorFlowNET.Console/MemoryBasicTest.cs diff --git a/src/TensorFlowNET.Console/MemoryFuncGraphTest.cs b/tools/TensorFlowNET.Console/MemoryFuncGraphTest.cs similarity index 100% rename from src/TensorFlowNET.Console/MemoryFuncGraphTest.cs rename to tools/TensorFlowNET.Console/MemoryFuncGraphTest.cs diff --git a/src/TensorFlowNET.Console/MemoryKerasTest.cs b/tools/TensorFlowNET.Console/MemoryKerasTest.cs similarity index 100% rename from src/TensorFlowNET.Console/MemoryKerasTest.cs rename to tools/TensorFlowNET.Console/MemoryKerasTest.cs diff --git a/src/TensorFlowNET.Console/MemoryMonitor.cs b/tools/TensorFlowNET.Console/MemoryMonitor.cs similarity index 100% rename from src/TensorFlowNET.Console/MemoryMonitor.cs rename to tools/TensorFlowNET.Console/MemoryMonitor.cs diff --git a/src/TensorFlowNET.Console/Program.cs b/tools/TensorFlowNET.Console/Program.cs similarity index 100% rename from src/TensorFlowNET.Console/Program.cs rename to tools/TensorFlowNET.Console/Program.cs diff --git a/src/TensorFlowNET.Console/SimpleRnnTest.cs b/tools/TensorFlowNET.Console/SimpleRnnTest.cs similarity index 100% rename from src/TensorFlowNET.Console/SimpleRnnTest.cs rename to tools/TensorFlowNET.Console/SimpleRnnTest.cs diff --git a/src/TensorFlowNET.Console/Tensorflow.Console.csproj b/tools/TensorFlowNET.Console/Tensorflow.Console.csproj similarity index 100% rename from src/TensorFlowNET.Console/Tensorflow.Console.csproj rename to tools/TensorFlowNET.Console/Tensorflow.Console.csproj diff --git a/Tensorflow.CodeGen/DescriptionGenerator.cs b/tools/Tensorflow.CodeGen/DescriptionGenerator.cs similarity index 100% rename from Tensorflow.CodeGen/DescriptionGenerator.cs rename to tools/Tensorflow.CodeGen/DescriptionGenerator.cs diff --git a/Tensorflow.CodeGen/FunctionGenerator.cs b/tools/Tensorflow.CodeGen/FunctionGenerator.cs similarity index 100% rename from Tensorflow.CodeGen/FunctionGenerator.cs rename to tools/Tensorflow.CodeGen/FunctionGenerator.cs diff --git a/Tensorflow.CodeGen/GenOpsWriter.cs b/tools/Tensorflow.CodeGen/GenOpsWriter.cs similarity index 100% rename from Tensorflow.CodeGen/GenOpsWriter.cs rename to tools/Tensorflow.CodeGen/GenOpsWriter.cs diff --git a/Tensorflow.CodeGen/OpClassifier.cs b/tools/Tensorflow.CodeGen/OpClassifier.cs similarity index 100% rename from Tensorflow.CodeGen/OpClassifier.cs rename to tools/Tensorflow.CodeGen/OpClassifier.cs diff --git a/Tensorflow.CodeGen/Program.cs b/tools/Tensorflow.CodeGen/Program.cs similarity index 100% rename from Tensorflow.CodeGen/Program.cs rename to tools/Tensorflow.CodeGen/Program.cs diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj similarity index 100% rename from Tensorflow.CodeGen/Tensorflow.CodeGen.csproj rename to tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj diff --git a/Tensorflow.CodeGen/Utils.cs b/tools/Tensorflow.CodeGen/Utils.cs similarity index 100% rename from Tensorflow.CodeGen/Utils.cs rename to tools/Tensorflow.CodeGen/Utils.cs diff --git a/tools/Tensorflow.Redist.NativeLibrarySplitter/Program.cs b/tools/Tensorflow.Redist.NativeLibrarySplitter/Program.cs new file mode 100644 index 000000000..cdc011ea9 --- /dev/null +++ b/tools/Tensorflow.Redist.NativeLibrarySplitter/Program.cs @@ -0,0 +1,212 @@ + +// =================================================================== // +// This is a tool to split the native .so file of linux gpu library // +// =================================================================== // + +using System.Security.Cryptography; + +string filename = "libtensorflow.so"; +int count = 5; +SplitFile(filename, count); + +static void SplitFile(string filename, int count) +{ + // 打开读取二进制文件的文件流 + using (FileStream input = new FileStream(filename, FileMode.Open, FileAccess.Read)) + { + long filesize = new FileInfo(filename).Length; // 获取文件大小 + long fragmentSize = (long)(filesize / count + 1); // 计算每个分片的大小 + + byte[] buffer = new byte[fragmentSize]; // 设置缓冲区大小 + int bytesRead; // 存储读取长度 + int fragmentIndex = 1; // 分片计数器 + + // 使用循环遍历分片并写入相应的文件 + while ((bytesRead = input.Read(buffer, 0, buffer.Length)) > 0) + { + string outputFileName = $"{filename}.fragment{fragmentIndex++}"; + using (FileStream output = new FileStream(outputFileName, FileMode.Create, FileAccess.Write)) + { + output.Write(buffer, 0, bytesRead); + } + } + + // 计算整个文件的 SHA-256 哈希值并写入 .sha 文件 + using (SHA256 sha256Hash = SHA256.Create()) + { + input.Seek(0, SeekOrigin.Begin); + byte[] hashValue = sha256Hash.ComputeHash(input); + + string shaFileName = $"{filename}.sha"; + using (StreamWriter writer = new StreamWriter(shaFileName, false)) + { + writer.Write(BitConverter.ToString(hashValue).Replace("-", "")); + } + } + } +} + +// Resume the file from fregments. Thanks for the code in TorchSharp! +static void Restitch(string RestitcherPackage) +{ + // !!!!!!!------------------------------NOTE------------------------------------!!!!!! + // !!!!!!! This code is manually copied into pkg\common\RestitchPackage.targets !!!!!! + // !!!!!!!------------------------------NOTE------------------------------------!!!!!! + // + // vvvvvvvvvvvvvvvvvvvvvvvvvvvvv START HERE vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv + try + { + if (Directory.Exists(RestitcherPackage)) + { + using (var writer = File.CreateText("obj/tensorflow_redist_build_log.txt")) + { + foreach (var p in Directory.EnumerateFiles(RestitcherPackage, "*", SearchOption.AllDirectories)) + { + + var primaryFile = Path.GetFullPath(p); + writer.WriteLine("Found primary file at {0}", primaryFile); + + // See if there are fragments in the parallel nuget packages. If the primary is + // some-package-primary\runtimes\....\a.so + // some-package-primary\runtimes\....\a.so.sha + // then the expected fragments are + // some-package-fragment1\fragments\....\a.so + // some-package-fragment2\fragments\....\a.so + // some-package-fragment3\fragments\....\a.so + // some-package-fragment4\fragments\....\a.so + // some-package-fragment5\fragments\....\a.so + // some-package-fragment6\fragments\....\a.so + // some-package-fragment7\fragments\....\a.so + // some-package-fragment8\fragments\....\a.so + // some-package-fragment9\fragments\....\a.so + // some-package-fragment10\fragments\....\a.so + var shaFile = primaryFile + ".sha"; + var fragmentFile1 = primaryFile.Replace("-primary", "-fragment1").Replace("runtimes", "fragments") + ".fragment1"; + var fragmentFile2 = primaryFile.Replace("-primary", "-fragment2").Replace("runtimes", "fragments") + ".fragment2"; + var fragmentFile3 = primaryFile.Replace("-primary", "-fragment3").Replace("runtimes", "fragments") + ".fragment3"; + var fragmentFile4 = primaryFile.Replace("-primary", "-fragment4").Replace("runtimes", "fragments") + ".fragment4"; + var fragmentFile5 = primaryFile.Replace("-primary", "-fragment5").Replace("runtimes", "fragments") + ".fragment5"; + + + if (File.Exists(fragmentFile1)) writer.WriteLine("Found fragment file at {0}", fragmentFile1); + if (File.Exists(fragmentFile2)) writer.WriteLine("Found fragment file at {0}", fragmentFile2); + if (File.Exists(fragmentFile3)) writer.WriteLine("Found fragment file at {0}", fragmentFile3); + if (File.Exists(fragmentFile4)) writer.WriteLine("Found fragment file at {0}", fragmentFile4); + if (File.Exists(fragmentFile5)) writer.WriteLine("Found fragment file at {0}", fragmentFile5); + + if (File.Exists(fragmentFile1)) + { + var tmpFile = Path.GetTempFileName(); + + { + writer.WriteLine("Writing restored primary file at {0}", tmpFile); + using (var os = File.OpenWrite(tmpFile)) + { + + //writer.WriteLine("Writing bytes from {0} to {1}", primaryFile, tmpFile); + //var primaryBytes = File.ReadAllBytes(primaryFile); + + //os.Write(primaryBytes, 0, primaryBytes.Length); + if (File.Exists(fragmentFile1)) + { + writer.WriteLine("Writing fragment bytes from {0} to {1}", fragmentFile1, tmpFile); + var fragmentBytes1 = File.ReadAllBytes(fragmentFile1); + os.Write(fragmentBytes1, 0, fragmentBytes1.Length); + } + if (File.Exists(fragmentFile2)) + { + writer.WriteLine("Writing fragment bytes from {0} to {1}", fragmentFile2, tmpFile); + var fragmentBytes2 = File.ReadAllBytes(fragmentFile2); + os.Write(fragmentBytes2, 0, fragmentBytes2.Length); + } + if (File.Exists(fragmentFile3)) + { + writer.WriteLine("Writing fragment bytes from {0} to {1}", fragmentFile3, tmpFile); + var fragmentBytes3 = File.ReadAllBytes(fragmentFile3); + os.Write(fragmentBytes3, 0, fragmentBytes3.Length); + } + if (File.Exists(fragmentFile4)) + { + writer.WriteLine("Writing fragment bytes from {0} to {1}", fragmentFile4, tmpFile); + var fragmentBytes4 = File.ReadAllBytes(fragmentFile4); + os.Write(fragmentBytes4, 0, fragmentBytes4.Length); + } + if (File.Exists(fragmentFile5)) + { + writer.WriteLine("Writing fragment bytes from {0} to {1}", fragmentFile5, tmpFile); + var fragmentBytes5 = File.ReadAllBytes(fragmentFile5); + os.Write(fragmentBytes5, 0, fragmentBytes5.Length); + } + } + } + + var shaExpected = File.Exists(shaFile) ? File.ReadAllText(shaFile).ToUpper() : ""; + writer.WriteLine($"real sha: {shaExpected}"); + + using (var sha256Hash = System.Security.Cryptography.SHA256.Create()) + { + using (var os2 = File.OpenRead(tmpFile)) + { + + byte[] bytes = sha256Hash.ComputeHash(os2); + var builder = new System.Text.StringBuilder(); + for (int i = 0; i < bytes.Length; i++) + { + builder.Append(bytes[i].ToString("x2")); + } + var shaReconstituted = builder.ToString().ToUpper(); + if (shaExpected != shaReconstituted) + { + string msg = + $"Error downloading and reviving packages. Reconsituted file contents have incorrect SHA\n\tExpected SHA: ${shaExpected}\n\tActual SHA: ${shaReconstituted}\n\tFile was reconstituted from:" + + $"\n\t{primaryFile} (length ${new FileInfo(primaryFile).Length})" + + (File.Exists(fragmentFile1) ? $"\n\t{fragmentFile1} (length ${new FileInfo(fragmentFile1).Length})" : "") + + (File.Exists(fragmentFile2) ? $"\n\t{fragmentFile2} (length ${new FileInfo(fragmentFile2).Length})" : "") + + (File.Exists(fragmentFile3) ? $"\n\t{fragmentFile3} (length ${new FileInfo(fragmentFile3).Length})" : "") + + (File.Exists(fragmentFile4) ? $"\n\t{fragmentFile4} (length ${new FileInfo(fragmentFile4).Length})" : "") + + (File.Exists(fragmentFile5) ? $"\n\t{fragmentFile5} (length ${new FileInfo(fragmentFile5).Length})" : ""); + writer.WriteLine(msg); + throw new Exception(msg); + } + } + + } + + writer.WriteLine("Deleting {0}", primaryFile); + File.Delete(primaryFile); + if (File.Exists(primaryFile)) + throw new Exception("wtf?"); + + writer.WriteLine("Moving {0} --> {1}", tmpFile, primaryFile); + File.Move(tmpFile, primaryFile); + + writer.WriteLine("Deleting {0}", fragmentFile1); + File.Delete(fragmentFile1); // free up space and prevent us doing this again + + writer.WriteLine("Deleting {0}", fragmentFile2); + if (File.Exists(fragmentFile2)) + File.Delete(fragmentFile2); // free up space and prevent us doing this again + + writer.WriteLine("Deleting {0}", fragmentFile3); + if (File.Exists(fragmentFile3)) + File.Delete(fragmentFile3); // free up space and prevent us doing this again + + writer.WriteLine("Deleting {0}", fragmentFile4); + if (File.Exists(fragmentFile4)) + File.Delete(fragmentFile4); // free up space and prevent us doing this again + + writer.WriteLine("Deleting {0}", fragmentFile5); + if (File.Exists(fragmentFile5)) + File.Delete(fragmentFile5); // free up space and prevent us doing this again + } + } + } + } + } + catch (Exception ex) + { + Console.Error.WriteLine(ex.ToString()); + Console.Error.WriteLine(ex.StackTrace); + } + // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ END HERE^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +} \ No newline at end of file diff --git a/tools/Tensorflow.Redist.NativeLibrarySplitter/Tensorflow.Redist.NativeLibrarySplitter.csproj b/tools/Tensorflow.Redist.NativeLibrarySplitter/Tensorflow.Redist.NativeLibrarySplitter.csproj new file mode 100644 index 000000000..74abf5c97 --- /dev/null +++ b/tools/Tensorflow.Redist.NativeLibrarySplitter/Tensorflow.Redist.NativeLibrarySplitter.csproj @@ -0,0 +1,10 @@ + + + + Exe + net6.0 + enable + enable + + + diff --git a/helpers/Tensorflow.UnitTest.RedistHolder/EmptyClass.cs b/tools/Tensorflow.UnitTest.RedistHolder/EmptyClass.cs similarity index 100% rename from helpers/Tensorflow.UnitTest.RedistHolder/EmptyClass.cs rename to tools/Tensorflow.UnitTest.RedistHolder/EmptyClass.cs diff --git a/helpers/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj b/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj similarity index 100% rename from helpers/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj rename to tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj diff --git a/scripts/Copy-NativeTensorFlowLibs.ps1 b/tools/scripts/Copy-NativeTensorFlowLibs.ps1 similarity index 100% rename from scripts/Copy-NativeTensorFlowLibs.ps1 rename to tools/scripts/Copy-NativeTensorFlowLibs.ps1 diff --git a/tensorflowlib/README.md b/tools/tensorflowlib/README.md similarity index 100% rename from tensorflowlib/README.md rename to tools/tensorflowlib/README.md From 9ce5b29bff2bb3ad6c3605a053a99d1d7648a61a Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Tue, 16 May 2023 02:41:19 +0800 Subject: [PATCH 012/182] feat: add check for redist backend. --- src/TensorFlowNET.Core/APIs/c_api.cs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs index 10f678e0a..587470e3f 100644 --- a/src/TensorFlowNET.Core/APIs/c_api.cs +++ b/src/TensorFlowNET.Core/APIs/c_api.cs @@ -45,6 +45,21 @@ public partial class c_api { public const string TensorFlowLibName = "tensorflow"; + static c_api() + { + try + { + var handle = TF_Version(); + } + catch (DllNotFoundException) + { + throw new RuntimeError("Tensorflow.NET cannot find a backend. Please install one of the following packages for your program: " + + "SciSharp.TensorFlow.Redist, SciSharp.TensorFlow.Redist-Linux-GPU, SciSharp.TensorFlow.Redist-Windows-GPU. For more details, " + + "please visit https://github.com/SciSharp/TensorFlow.NET. If it still not work after installing the backend, please submit an " + + "issue to https://github.com/SciSharp/TensorFlow.NET/issues"); + } + } + public static string StringPiece(IntPtr handle) { return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle); From 634860d7555e5a722639246581d8d18628936c14 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Tue, 16 May 2023 02:44:43 +0800 Subject: [PATCH 013/182] fix: unittest project reference. --- TensorFlow.NET.sln | 206 +++++++++--------- .../TensorFlowNET.Graph.UnitTest.csproj | 1 - .../Tensorflow.Keras.UnitTest.csproj | 1 - .../Tensorflow.Native.UnitTest.csproj | 1 - .../Tensorflow.Binding.UnitTest.csproj | 2 +- .../Tensorflow.Hub.Unittest.csproj | 1 - .../Tensorflow.Benchmark.csproj | 2 +- .../Tensorflow.Console.csproj | 4 +- .../Tensorflow.CodeGen.csproj | 2 +- 9 files changed, 108 insertions(+), 112 deletions(-) diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index ac6e6afae..87729e27d 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -5,12 +5,8 @@ VisualStudioVersion = 17.4.33213.308 MinimumVisualStudioVersion = 10.0.40219.1 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}" -EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.Binding.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Console", "src\TensorFlowNET.Console\Tensorflow.Console.csproj", "{03F06299-3F4B-4449-A709-3A647657BC0C}" -EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{49D71826-C03D-4FA7-9BAC-22C1327E65CF}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Text", "src\TensorFlowNET.Text\Tensorflow.Text.csproj", "{1AB8108D-4FFE-4A16-88E7-328EAF686370}" @@ -31,13 +27,17 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{01A1787F-A9B EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{1B0918B9-65AD-4F34-A287-AF4597B27DBD}" EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "helpers", "helpers", "{E1A5D2B7-10AF-4876-85C0-7714EF274214}" +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{E1A5D2B7-10AF-4876-85C0-7714EF274214}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "tools\Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{3D92142F-EEDB-469B-B03C-4E38728BFE4C}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Redist.NativeLibrarySplitter", "tools\Tensorflow.Redist.NativeLibrarySplitter\Tensorflow.Redist.NativeLibrarySplitter.csproj", "{AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistHolder", "helpers\Tensorflow.UnitTest.RedistHolder\Tensorflow.UnitTest.RedistHolder.csproj", "{62D543A2-8846-45A3-829B-5754B094A8E2}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistHolder", "tools\Tensorflow.UnitTest.RedistHolder\Tensorflow.UnitTest.RedistHolder.csproj", "{D24FCAA5-548C-4251-B226-A1B6535D0845}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{BADBB104-2F03-4824-A249-803A871D8122}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "tools\TensorFlowNET.Benchmarks\Tensorflow.Benchmark.csproj", "{C23563DB-FE21-48E7-A411-87A109E4A899}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow.Redist.NativeLibrarySplitter", "NativeLibrarySplitter\Tensorflow.Redist.NativeLibrarySplitter.csproj", "{B85FA7C7-1E8D-4567-B3F4-605955557DAE}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Console", "tools\TensorFlowNET.Console\Tensorflow.Console.csproj", "{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -70,24 +70,6 @@ Global {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|x64 {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.ActiveCfg = Release|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.Build.0 = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|x64 - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|x64 - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x86.ActiveCfg = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x86.Build.0 = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.GPU|Any CPU.ActiveCfg = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.GPU|Any CPU.Build.0 = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.GPU|x64.ActiveCfg = Release|x64 - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.GPU|x64.Build.0 = Release|x64 - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.GPU|x86.ActiveCfg = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.GPU|x86.Build.0 = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|x64 - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|x64 - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x86.ActiveCfg = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x86.Build.0 = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|x64 @@ -106,24 +88,6 @@ Global {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|x64 {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.ActiveCfg = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.Build.0 = Release|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|Any CPU.Build.0 = Debug|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x64.ActiveCfg = Debug|x64 - {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x64.Build.0 = Debug|x64 - {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x86.ActiveCfg = Debug|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x86.Build.0 = Debug|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.GPU|Any CPU.ActiveCfg = Release|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.GPU|Any CPU.Build.0 = Release|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.GPU|x64.ActiveCfg = Release|x64 - {03F06299-3F4B-4449-A709-3A647657BC0C}.GPU|x64.Build.0 = Release|x64 - {03F06299-3F4B-4449-A709-3A647657BC0C}.GPU|x86.ActiveCfg = Release|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.GPU|x86.Build.0 = Release|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|Any CPU.ActiveCfg = Release|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|Any CPU.Build.0 = Release|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x64.ActiveCfg = Release|x64 - {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x64.Build.0 = Release|x64 - {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x86.ActiveCfg = Release|Any CPU - {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x86.Build.0 = Release|Any CPU {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|Any CPU.Build.0 = Debug|Any CPU {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x64.ActiveCfg = Debug|x64 @@ -268,69 +232,103 @@ Global {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x64.Build.0 = Release|Any CPU {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x86.ActiveCfg = Release|Any CPU {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x86.Build.0 = Release|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Debug|Any CPU.Build.0 = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Debug|x64.ActiveCfg = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Debug|x64.Build.0 = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Debug|x86.ActiveCfg = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Debug|x86.Build.0 = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.GPU|Any CPU.ActiveCfg = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.GPU|Any CPU.Build.0 = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.GPU|x64.ActiveCfg = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.GPU|x64.Build.0 = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.GPU|x86.ActiveCfg = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.GPU|x86.Build.0 = Debug|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|Any CPU.ActiveCfg = Release|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|Any CPU.Build.0 = Release|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x64.ActiveCfg = Release|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x64.Build.0 = Release|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x86.ActiveCfg = Release|Any CPU - {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x86.Build.0 = Release|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Debug|Any CPU.Build.0 = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x64.ActiveCfg = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x64.Build.0 = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x86.ActiveCfg = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x86.Build.0 = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.GPU|Any CPU.ActiveCfg = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.GPU|Any CPU.Build.0 = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x64.ActiveCfg = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x64.Build.0 = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x86.ActiveCfg = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x86.Build.0 = Debug|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Release|Any CPU.ActiveCfg = Release|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Release|Any CPU.Build.0 = Release|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.ActiveCfg = Release|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.Build.0 = Release|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.ActiveCfg = Release|Any CPU - {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.Build.0 = Release|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|Any CPU.Build.0 = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|x64.ActiveCfg = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|x64.Build.0 = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|x86.ActiveCfg = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Debug|x86.Build.0 = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|Any CPU.ActiveCfg = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|Any CPU.Build.0 = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|x64.ActiveCfg = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|x64.Build.0 = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|x86.ActiveCfg = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.GPU|x86.Build.0 = Debug|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|Any CPU.ActiveCfg = Release|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|Any CPU.Build.0 = Release|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|x64.ActiveCfg = Release|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|x64.Build.0 = Release|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|x86.ActiveCfg = Release|Any CPU - {B85FA7C7-1E8D-4567-B3F4-605955557DAE}.Release|x86.Build.0 = Release|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x64.ActiveCfg = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x64.Build.0 = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x86.ActiveCfg = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x86.Build.0 = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|Any CPU.Build.0 = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x64.ActiveCfg = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x64.Build.0 = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x86.ActiveCfg = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x86.Build.0 = Debug|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|Any CPU.Build.0 = Release|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x64.ActiveCfg = Release|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x64.Build.0 = Release|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x86.ActiveCfg = Release|Any CPU + {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x86.Build.0 = Release|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x64.ActiveCfg = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x64.Build.0 = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x86.ActiveCfg = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x86.Build.0 = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|Any CPU.Build.0 = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x64.ActiveCfg = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x64.Build.0 = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x86.ActiveCfg = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x86.Build.0 = Debug|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|Any CPU.Build.0 = Release|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x64.ActiveCfg = Release|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x64.Build.0 = Release|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x86.ActiveCfg = Release|Any CPU + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x86.Build.0 = Release|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|Any CPU.Build.0 = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x64.ActiveCfg = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x64.Build.0 = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x86.ActiveCfg = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x86.Build.0 = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|Any CPU.Build.0 = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x64.ActiveCfg = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x64.Build.0 = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x86.ActiveCfg = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x86.Build.0 = Debug|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|Any CPU.ActiveCfg = Release|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|Any CPU.Build.0 = Release|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x64.ActiveCfg = Release|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x64.Build.0 = Release|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x86.ActiveCfg = Release|Any CPU + {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x86.Build.0 = Release|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x64.ActiveCfg = Debug|x64 + {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x64.Build.0 = Debug|x64 + {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x86.ActiveCfg = Debug|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x86.Build.0 = Debug|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|Any CPU.Build.0 = Debug|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x64.ActiveCfg = Debug|x64 + {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x64.Build.0 = Debug|x64 + {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x86.ActiveCfg = Debug|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x86.Build.0 = Debug|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|Any CPU.Build.0 = Release|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x64.ActiveCfg = Release|x64 + {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x64.Build.0 = Release|x64 + {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x86.ActiveCfg = Release|Any CPU + {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x86.Build.0 = Release|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x64.ActiveCfg = Debug|x64 + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x64.Build.0 = Debug|x64 + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x86.ActiveCfg = Debug|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x86.Build.0 = Debug|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|Any CPU.Build.0 = Debug|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x64.ActiveCfg = Debug|x64 + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x64.Build.0 = Debug|x64 + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x86.ActiveCfg = Debug|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x86.Build.0 = Debug|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|Any CPU.Build.0 = Release|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.ActiveCfg = Release|x64 + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.Build.0 = Release|x64 + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.ActiveCfg = Release|Any CPU + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution {FD682AC0-7B2D-45D3-8B0D-C6D678B04144} = {01A1787F-A9BE-4221-84E8-6360DD010AB6} - {3A6EB896-604F-4E25-B677-B8103BCF3D2E} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {23C28035-2FCE-41F3-9A12-E73CE8A5AE32} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} - {03F06299-3F4B-4449-A709-3A647657BC0C} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {49D71826-C03D-4FA7-9BAC-22C1327E65CF} = {01A1787F-A9BE-4221-84E8-6360DD010AB6} {1AB8108D-4FFE-4A16-88E7-328EAF686370} = {01A1787F-A9BE-4221-84E8-6360DD010AB6} {F17AAECB-960A-4E18-A270-BAD776F0E55B} = {01A1787F-A9BE-4221-84E8-6360DD010AB6} @@ -339,9 +337,11 @@ Global {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} {9738D16A-CFA0-405C-A7DF-D3D203B0CB18} = {01A1787F-A9BE-4221-84E8-6360DD010AB6} {7DEA8760-E401-4872-81F3-405F185A13A0} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} - {62D543A2-8846-45A3-829B-5754B094A8E2} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} - {BADBB104-2F03-4824-A249-803A871D8122} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} - {B85FA7C7-1E8D-4567-B3F4-605955557DAE} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {3D92142F-EEDB-469B-B03C-4E38728BFE4C} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {D24FCAA5-548C-4251-B226-A1B6535D0845} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {C23563DB-FE21-48E7-A411-87A109E4A899} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} diff --git a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj index 1385f8611..52adf24c8 100644 --- a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj +++ b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj @@ -34,7 +34,6 @@ - diff --git a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj index b964d1178..716849181 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj +++ b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj @@ -23,7 +23,6 @@ - diff --git a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj index 61373d2dc..05d1e56f3 100644 --- a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj +++ b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj @@ -54,7 +54,6 @@ - diff --git a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj index 3a5562e2c..98dadf012 100644 --- a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj +++ b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj @@ -48,9 +48,9 @@ - + diff --git a/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj b/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj index 35cb9f16d..f52ed1e17 100644 --- a/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj +++ b/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj @@ -16,7 +16,6 @@ - diff --git a/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj b/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj index 53261f805..f2495d224 100644 --- a/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj +++ b/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj @@ -41,7 +41,7 @@ - + diff --git a/tools/TensorFlowNET.Console/Tensorflow.Console.csproj b/tools/TensorFlowNET.Console/Tensorflow.Console.csproj index 1b84bb145..c79d4845c 100644 --- a/tools/TensorFlowNET.Console/Tensorflow.Console.csproj +++ b/tools/TensorFlowNET.Console/Tensorflow.Console.csproj @@ -24,8 +24,8 @@ - - + + diff --git a/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index 5948fb2c3..4cb3368d0 100644 --- a/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -13,7 +13,7 @@ - + From 9f8f3d87d005963bc057fec16b5f02955c492dfe Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Tue, 16 May 2023 03:22:16 +0800 Subject: [PATCH 014/182] fix: error caused by dll check in c_api. --- src/TensorFlowNET.Core/APIs/c_api.cs | 15 --------------- src/TensorFlowNET.Core/tensorflow.cs | 12 ++++++++++++ .../TensorFlowNET.Graph.UnitTest.csproj | 1 + .../Tensorflow.Keras.UnitTest.csproj | 1 + .../Tensorflow.Native.UnitTest.csproj | 1 + .../Tensorflow.Hub.Unittest.csproj | 1 + 6 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs index 587470e3f..10f678e0a 100644 --- a/src/TensorFlowNET.Core/APIs/c_api.cs +++ b/src/TensorFlowNET.Core/APIs/c_api.cs @@ -45,21 +45,6 @@ public partial class c_api { public const string TensorFlowLibName = "tensorflow"; - static c_api() - { - try - { - var handle = TF_Version(); - } - catch (DllNotFoundException) - { - throw new RuntimeError("Tensorflow.NET cannot find a backend. Please install one of the following packages for your program: " + - "SciSharp.TensorFlow.Redist, SciSharp.TensorFlow.Redist-Linux-GPU, SciSharp.TensorFlow.Redist-Windows-GPU. For more details, " + - "please visit https://github.com/SciSharp/TensorFlow.NET. If it still not work after installing the backend, please submit an " + - "issue to https://github.com/SciSharp/TensorFlow.NET/issues"); - } - } - public static string StringPiece(IntPtr handle) { return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle); diff --git a/src/TensorFlowNET.Core/tensorflow.cs b/src/TensorFlowNET.Core/tensorflow.cs index 67530ddbd..dc4e48da8 100644 --- a/src/TensorFlowNET.Core/tensorflow.cs +++ b/src/TensorFlowNET.Core/tensorflow.cs @@ -86,6 +86,18 @@ public tensorflow() OpDefLib = new OpDefLibrary(); InitGradientEnvironment(); + + try + { + var handle = c_api.TF_Version(); + } + catch (DllNotFoundException) + { + throw new RuntimeError("Tensorflow.NET cannot find a backend. Please install one of the following packages for your program: " + + "SciSharp.TensorFlow.Redist, SciSharp.TensorFlow.Redist-Linux-GPU, SciSharp.TensorFlow.Redist-Windows-GPU. For more details, " + + "please visit https://github.com/SciSharp/TensorFlow.NET. If it still not work after installing the backend, please submit an " + + "issue to https://github.com/SciSharp/TensorFlow.NET/issues"); + } } public string VERSION => c_api.StringPiece(c_api.TF_Version()); diff --git a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj index 52adf24c8..c353832ad 100644 --- a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj +++ b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj @@ -35,6 +35,7 @@ + diff --git a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj index 716849181..d744c3364 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj +++ b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj @@ -24,6 +24,7 @@ + diff --git a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj index 05d1e56f3..9fec0e6d5 100644 --- a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj +++ b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj @@ -55,6 +55,7 @@ + diff --git a/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj b/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj index f52ed1e17..4c3918e4a 100644 --- a/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj +++ b/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj @@ -17,6 +17,7 @@ + From 516dfe715a904756c9a7d7a29e7b914aa601b161 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Tue, 16 May 2023 03:15:06 +0800 Subject: [PATCH 015/182] docs: add tf.keras badge. --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 03f30d2b2..2b7eab5a4 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,10 @@ [![Discord](https://img.shields.io/discord/1106946823282761851?label=Discord)](https://discord.gg/quBc2jrz) [![Join the chat at https://gitter.im/publiclab/publiclab](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sci-sharp/community) [![CI Status](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml/badge.svg)](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml) -[![NuGet Badge](https://buildstats.info/nuget/TensorFlow.NET?includePreReleases=true)](https://www.nuget.org/packages/TensorFlow.NET) -[![MyGet Badge](https://img.shields.io/badge/dynamic/json?color=purple&label=nightly%20release&prefix=myget-v&query=items%5B0%5D.lower&url=https%3A%2F%2Fwww.myget.org%2FF%2Fscisharp%2Fapi%2Fv3%2Fregistration1%2Ftensorflow.net%2Findex.json)](https://www.myget.org/feed/scisharp/package/nuget/Tensorflow.NET) [![Documentation Status](https://readthedocs.org/projects/tensorflownet/badge/?version=latest)](https://tensorflownet.readthedocs.io/en/latest/?badge=latest) +[![TensorFlow.NET Badge](https://img.shields.io/nuget/v/TensorFlow.NET?label=TensorFlow.NET)](https://www.nuget.org/packages/TensorFlow.NET) +[![TensorFlow.Keras Badge](https://img.shields.io/nuget/v/TensorFlow.Keras?label=TensorFlow.Keras)](https://www.nuget.org/packages/TensorFlow.Keras) +[![MyGet Badge](https://img.shields.io/badge/dynamic/json?color=purple&label=Nightly%20Release&prefix=myget-v&query=items%5B0%5D.lower&url=https%3A%2F%2Fwww.myget.org%2FF%2Fscisharp%2Fapi%2Fv3%2Fregistration1%2Ftensorflow.net%2Findex.json)](https://www.myget.org/feed/scisharp/package/nuget/Tensorflow.NET) [![Badge](https://img.shields.io/badge/link-996.icu-red.svg)](https://996.icu/#/en_US) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab) From 8bf324add97ccedf5eb9fc8b443d1f5d00e2b621 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Wed, 17 May 2023 15:04:02 +0800 Subject: [PATCH 016/182] docs: add vote info to readme. --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 2b7eab5a4..22b7a3b69 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,20 @@ English | [中文](docs/README-CN.md) +**=========================================================** + +### Voting: Naming Convention approach of v1.0.0 + +Dear all, + +We would like to urge you to participate in our upcoming vote regarding the naming convention for TensorFlow.NET version 1.0.0 in #1074. Your participation in the vote is essential to help us decide on the best approach for improving the naming convention used in previous versions. + +Thank you, + +TensorFlow Authors + +**=========================================================** + *master branch and v0.100.x is corresponding to tensorflow v2.10, v0.6x branch is from tensorflow v2.6, v0.15-tensorflow1.15 is from tensorflow1.15. Please add `https://www.myget.org/F/scisharp/api/v3/index.json` to nuget source to use nightly release.* From e052dfc1cdbc432c97f1a1c6ed5985508408faa0 Mon Sep 17 00:00:00 2001 From: Rinne Date: Wed, 17 May 2023 16:12:19 +0800 Subject: [PATCH 017/182] docs: update the readme. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 22b7a3b69..93b00f181 100644 --- a/README.md +++ b/README.md @@ -16,11 +16,11 @@ English | [中文](docs/README-CN.md) **=========================================================** -### Voting: Naming Convention approach of v1.0.0 +### [Voting: Naming Convention approach of v1.0.0](https://github.com/SciSharp/TensorFlow.NET/issues/1074) Dear all, -We would like to urge you to participate in our upcoming vote regarding the naming convention for TensorFlow.NET version 1.0.0 in #1074. Your participation in the vote is essential to help us decide on the best approach for improving the naming convention used in previous versions. +We would like to urge you to participate in our upcoming vote regarding the naming convention for TensorFlow.NET version 1.0.0 in [#1074](https://github.com/SciSharp/TensorFlow.NET/issues/1074). Your participation in the vote is essential to help us decide on the best approach for improving the naming convention used in previous versions. Thank you, From 80c39523a923f33b836ed413bf2f76ba19f8b9bb Mon Sep 17 00:00:00 2001 From: Rinne Date: Wed, 17 May 2023 16:13:24 +0800 Subject: [PATCH 018/182] docs: update the readme. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 93b00f181..fdf971b80 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ English | [中文](docs/README-CN.md) **=========================================================** -### [Voting: Naming Convention approach of v1.0.0](https://github.com/SciSharp/TensorFlow.NET/issues/1074) +### [Voting: Naming Convention Approach of v1.0.0](https://github.com/SciSharp/TensorFlow.NET/issues/1074) Dear all, @@ -24,7 +24,7 @@ We would like to urge you to participate in our upcoming vote regarding the nami Thank you, -TensorFlow Authors +TensorFlow.NET Authors **=========================================================** From 7d7f4e11829d27e626bcdf0276f1a16c80a93c78 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Wed, 17 May 2023 18:06:12 +0800 Subject: [PATCH 019/182] fix: error when set the activation parameter of keras.layers.Conv2DTranspose to null. --- src/TensorFlowNET.Keras/Activations.cs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/TensorFlowNET.Keras/Activations.cs b/src/TensorFlowNET.Keras/Activations.cs index 00de728f2..d6d8e3914 100644 --- a/src/TensorFlowNET.Keras/Activations.cs +++ b/src/TensorFlowNET.Keras/Activations.cs @@ -77,6 +77,10 @@ static Activations() public Activation GetActivationFromName(string name) { + if (name == null) + { + return _linear; + } if (!_nameActivationMap.TryGetValue(name, out var res)) { throw new Exception($"Activation {name} not found"); From 25f676d6b6a94e62ed795878ef0aad655b232a0c Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Thu, 18 May 2023 19:34:47 +0800 Subject: [PATCH 020/182] ci: sync the ci with latest update. --- .github/workflows/build_and_test.yml | 12 ++++++------ .../TensorFlowNET.Native.UnitTest/Lite/TfLiteTest.cs | 3 +++ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 070c7cbd7..9fd34fc49 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -28,9 +28,9 @@ jobs: - name: Test CPU version run: dotnet test --no-build --verbosity normal - name: uninstall redist cpu for unit tests - run: dotnet remove helpers/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist + run: dotnet remove tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist - name: install redist gpu for unit tests - run: dotnet add helpers/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist-Windows-GPU + run: dotnet add tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist-Windows-GPU - name: Restore dependencies run: dotnet restore - name: Build GPU version @@ -52,12 +52,12 @@ jobs: run: dotnet restore - name: Build CPU version run: dotnet build --no-restore - # - name: Test CPU version - # run: dotnet test --no-build --verbosity normal + - name: Test CPU version + run: dotnet test --no-build --verbosity normal - name: uninstall redist cpu for unit tests - run: dotnet remove helpers/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist + run: dotnet remove tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist - name: install redist gpu for unit tests - run: dotnet add helpers/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist-Linux-GPU + run: dotnet add tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist-Linux-GPU - name: Restore dependencies run: dotnet restore - name: Build GPU version diff --git a/test/TensorFlowNET.Native.UnitTest/Lite/TfLiteTest.cs b/test/TensorFlowNET.Native.UnitTest/Lite/TfLiteTest.cs index e16655575..4d0d6d8c9 100644 --- a/test/TensorFlowNET.Native.UnitTest/Lite/TfLiteTest.cs +++ b/test/TensorFlowNET.Native.UnitTest/Lite/TfLiteTest.cs @@ -13,6 +13,7 @@ namespace Tensorflow.Native.UnitTest public class TfLiteTest { [TestMethod] + [Ignore] public void TfLiteVersion() { var ver = c_api_lite.StringPiece(c_api_lite.TfLiteVersion()); @@ -20,6 +21,7 @@ public void TfLiteVersion() } [TestMethod] + [Ignore] public unsafe void SmokeTest() { var model = c_api_lite.TfLiteModelCreateFromFile("Lite/testdata/add.bin"); @@ -85,6 +87,7 @@ public unsafe void SmokeTest() } [TestMethod] + [Ignore] public unsafe void QuantizationParamsTest() { var model = c_api_lite.TfLiteModelCreateFromFile("Lite/testdata/add_quantized.bin"); From c0bf8d2a6546cf1617aeb7018365852956c68318 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 19 May 2023 00:08:03 +0800 Subject: [PATCH 021/182] fix: can't implement len for KerasShapesWrapper & Add bias implement to Conv2DTranspose.Call() --- .../Layers/Convolution/Conv2DTranspose.cs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs b/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs index 13bea627e..bbd49acd2 100644 --- a/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs +++ b/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs @@ -62,7 +62,7 @@ private static Conv2DArgs InitializeUndefinedArgs(Conv2DArgs args) public override void build(KerasShapesWrapper input_shape) { var single_shape = input_shape.ToSingleShape(); - if (len(input_shape) != 4) + if (len(single_shape) != 4) throw new ValueError($"Inputs should have rank 4. Received input shape: {input_shape}"); var channel_axis = _get_channel_axis(); @@ -138,7 +138,10 @@ protected override Tensors Call(Tensors inputs, Tensor state = null, bool? train } if (use_bias) - throw new NotImplementedException(""); + tf.nn.bias_add( + outputs, + bias, + data_format: conv_utils.convert_data_format(data_format, ndim: 4)); if (activation != null) return activation.Apply(outputs); From 3705dda8842e582ab3f33df36d64fedfab4a16b1 Mon Sep 17 00:00:00 2001 From: Haiping Date: Thu, 18 May 2023 20:29:12 -0500 Subject: [PATCH 022/182] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index fdf971b80..dcc58d70c 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ **TensorFlow.NET** (TF.NET) provides a .NET Standard binding for [TensorFlow](https://www.tensorflow.org/). It aims to implement the complete Tensorflow API in C# which allows .NET developers to develop, train and deploy Machine Learning models with the cross-platform .NET Standard framework. TensorFlow.NET has built-in Keras high-level interface and is released as an independent package [TensorFlow.Keras](https://www.nuget.org/packages/TensorFlow.Keras/). [![Discord](https://img.shields.io/discord/1106946823282761851?label=Discord)](https://discord.gg/quBc2jrz) +[![QQ群聊](https://img.shields.io/static/v1?label=QQ&message=群聊&color=brightgreen)](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=sN9VVMwbWjs5L0ATpizKKxOcZdEPMrp8&authKey=RLDw41bLTrEyEgZZi%2FzT4pYk%2BwmEFgFcrhs8ZbkiVY7a4JFckzJefaYNW6Lk4yPX&noverify=0&group_code=985366726) [![Join the chat at https://gitter.im/publiclab/publiclab](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sci-sharp/community) [![CI Status](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml/badge.svg)](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml) [![Documentation Status](https://readthedocs.org/projects/tensorflownet/badge/?version=latest)](https://tensorflownet.readthedocs.io/en/latest/?badge=latest) From 58de537be5b643c77f887bd13f146894d32bf8f7 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Fri, 19 May 2023 16:14:35 +0800 Subject: [PATCH 023/182] fix: status null reference of base session. --- src/TensorFlowNET.Core/Sessions/BaseSession.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/TensorFlowNET.Core/Sessions/BaseSession.cs b/src/TensorFlowNET.Core/Sessions/BaseSession.cs index 0a9cfc2eb..3dab4ec71 100644 --- a/src/TensorFlowNET.Core/Sessions/BaseSession.cs +++ b/src/TensorFlowNET.Core/Sessions/BaseSession.cs @@ -30,6 +30,7 @@ public BaseSession(SafeSessionHandle handle, Graph g) { _handle = handle; _graph = g ?? ops.get_default_graph(); + _status = tf.Status; } public BaseSession(string target = "", Graph g = null, ConfigProto config = null, Status status = null) From 6fb930aa6d703231f0749de09ade31ab44c00a10 Mon Sep 17 00:00:00 2001 From: Rinne Date: Wed, 24 May 2023 00:54:10 +0800 Subject: [PATCH 024/182] docs: update discord link. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index dcc58d70c..36ec1660c 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ **TensorFlow.NET** (TF.NET) provides a .NET Standard binding for [TensorFlow](https://www.tensorflow.org/). It aims to implement the complete Tensorflow API in C# which allows .NET developers to develop, train and deploy Machine Learning models with the cross-platform .NET Standard framework. TensorFlow.NET has built-in Keras high-level interface and is released as an independent package [TensorFlow.Keras](https://www.nuget.org/packages/TensorFlow.Keras/). -[![Discord](https://img.shields.io/discord/1106946823282761851?label=Discord)](https://discord.gg/quBc2jrz) +[![Discord](https://img.shields.io/discord/1106946823282761851?label=Discord)](https://discord.gg/qRVm82fKTS) [![QQ群聊](https://img.shields.io/static/v1?label=QQ&message=群聊&color=brightgreen)](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=sN9VVMwbWjs5L0ATpizKKxOcZdEPMrp8&authKey=RLDw41bLTrEyEgZZi%2FzT4pYk%2BwmEFgFcrhs8ZbkiVY7a4JFckzJefaYNW6Lk4yPX&noverify=0&group_code=985366726) [![Join the chat at https://gitter.im/publiclab/publiclab](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sci-sharp/community) [![CI Status](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml/badge.svg)](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml) @@ -255,7 +255,7 @@ Buy our book to make open source project be sustainable [TensorFlow.NET实战](h ### Contact -Join our chat on [Discord](https://discord.gg/quBc2jrz) or [Gitter](https://gitter.im/sci-sharp/community). +Join our chat on [Discord](https://discord.gg/qRVm82fKTS) or [Gitter](https://gitter.im/sci-sharp/community). Follow us on [Twitter](https://twitter.com/ScisharpStack), [Facebook](https://www.facebook.com/scisharp.stack.9), [Medium](https://medium.com/scisharp), [LinkedIn](https://www.linkedin.com/company/scisharp-stack/). From 4bca319eb4c67cb61453358dc1bf09f0be9a3172 Mon Sep 17 00:00:00 2001 From: AsakusaRinne Date: Thu, 25 May 2023 16:40:35 +0800 Subject: [PATCH 025/182] fix: temporarily fix the sequential nest error. --- .../Training/Saving/SavedModel/save.cs | 2 +- src/TensorFlowNET.Keras/Engine/Layer.Apply.cs | 14 +++++++++ src/TensorFlowNET.Keras/Engine/Layer.cs | 2 +- src/TensorFlowNET.Keras/Engine/Sequential.cs | 12 ++++++- .../Model/ModelBuildTest.cs | 31 +++++++++++++++++-- 5 files changed, 55 insertions(+), 6 deletions(-) diff --git a/src/TensorFlowNET.Core/Training/Saving/SavedModel/save.cs b/src/TensorFlowNET.Core/Training/Saving/SavedModel/save.cs index 4313920f5..23e0a9295 100644 --- a/src/TensorFlowNET.Core/Training/Saving/SavedModel/save.cs +++ b/src/TensorFlowNET.Core/Training/Saving/SavedModel/save.cs @@ -88,7 +88,7 @@ private static (MetaGraphDef, Graph, TrackableSaver, AssetInfo, IList { if (ops.inside_function()) { - throw new AssertionError("`tf.saved_model.save` is not supported inside a traced @tf.function. " + + throw new AssertionError("`tf.saved_model.save` is not supported inside a traced [AutoGraph]. " + "Move the call to the outer eagerly-executed context."); } diff --git a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs index 7d3721f12..c04304580 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs @@ -41,5 +41,19 @@ public Tensors Apply(Tensors inputs, Tensor state = null, bool training = false) return outputs; } + + // TODO(Rinne): remove it and completely fix issue 1084 + [Obsolete] + private bool _enforce_layer_construction = false; + [Obsolete] + internal void enforce_layer_construction() + { + _enforce_layer_construction = true; + } + [Obsolete] + internal void unset_layer_construction() + { + _enforce_layer_construction = false; + } } } diff --git a/src/TensorFlowNET.Keras/Engine/Layer.cs b/src/TensorFlowNET.Keras/Engine/Layer.cs index 7462b1367..5942efd92 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.cs @@ -291,7 +291,7 @@ internal virtual void Initialize(LayerArgs args) bool _in_functional_construction_mode(Tensors inputs) { return tf.Context.executing_eagerly() - && inputs.Count(x => x is not EagerTensor && x is not NDArray) == inputs.Count(); + && inputs.Count(x => x is not EagerTensor && x is not NDArray) == inputs.Count() || _enforce_layer_construction; } public void SetConnectivityMetadata(Tensors inputs, Tensors outputs) diff --git a/src/TensorFlowNET.Keras/Engine/Sequential.cs b/src/TensorFlowNET.Keras/Engine/Sequential.cs index 90167a9d9..278747515 100644 --- a/src/TensorFlowNET.Keras/Engine/Sequential.cs +++ b/src/TensorFlowNET.Keras/Engine/Sequential.cs @@ -62,7 +62,17 @@ public void InitLayers(IEnumerable layers) { foreach(var layer in layers) { + // TODO(Rinne): remove it and completely fix issue 1084 + if(layer is Sequential s) + { + s.Layers.ForEach(x => ((Layer)x).enforce_layer_construction()); + } add(layer); + // TODO(Rinne): remove it and completely fix issue 1084 + if (layer is Sequential s2) + { + s2.Layers.ForEach(x => ((Layer)x).unset_layer_construction()); + } } } @@ -163,7 +173,7 @@ void _build_graph_network_for_inferred_shape(Shape input_shape, TF_DataType inpu Tensors layer_output = null; Tensors outputs = null; List created_nodes = new List(); - foreach (var layer in args.Layers) + foreach (var layer in Layers) { clear_previously_created_nodes(layer, _created_nodes); layer_output = layer.Apply(layer_input); diff --git a/test/TensorFlowNET.Keras.UnitTest/Model/ModelBuildTest.cs b/test/TensorFlowNET.Keras.UnitTest/Model/ModelBuildTest.cs index e1fe9ff4f..d4b11a9b2 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Model/ModelBuildTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Model/ModelBuildTest.cs @@ -1,5 +1,7 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; using static Tensorflow.Binding; +using static Tensorflow.KerasApi; namespace Tensorflow.Keras.UnitTest.Model { @@ -14,24 +16,47 @@ public void DenseBuild() var dense = tf.keras.layers.Dense(64); var output = dense.Apply(input); var model = tf.keras.Model(input, output); + model.compile(tf.keras.optimizers.Adam(), tf.keras.losses.CategoricalCrossentropy()); // one dimensions input with unknown batchsize var input_2 = tf.keras.layers.Input((60)); var dense_2 = tf.keras.layers.Dense(64); - var output_2 = dense.Apply(input_2); + var output_2 = dense_2.Apply(input_2); var model_2 = tf.keras.Model(input_2, output_2); + model_2.compile(tf.keras.optimizers.Adam(), tf.keras.losses.CategoricalCrossentropy()); // two dimensions input with specified batchsize var input_3 = tf.keras.layers.Input((17, 60), 8); var dense_3 = tf.keras.layers.Dense(64); - var output_3 = dense.Apply(input_3); + var output_3 = dense_3.Apply(input_3); var model_3 = tf.keras.Model(input_3, output_3); + model_3.compile(tf.keras.optimizers.Adam(), tf.keras.losses.CategoricalCrossentropy()); // one dimensions input with specified batchsize var input_4 = tf.keras.layers.Input((60), 8); var dense_4 = tf.keras.layers.Dense(64); - var output_4 = dense.Apply(input_4); + var output_4 = dense_4.Apply(input_4); var model_4 = tf.keras.Model(input_4, output_4); + model_4.compile(tf.keras.optimizers.Adam(), tf.keras.losses.CategoricalCrossentropy()); + } + + [TestMethod] + public void NestedSequential() + { + var block1 = keras.Sequential(new[] { + keras.layers.InputLayer((3, 3)), + keras.Sequential(new [] + { + keras.layers.Flatten(), + keras.layers.Dense(5) + } + ) + }); + block1.compile(tf.keras.optimizers.Adam(), tf.keras.losses.CategoricalCrossentropy()); + + var x = tf.ones((1, 3, 3)); + var y = block1.predict(x); + Console.WriteLine(y); } } } From e9f2caca573222fedec8217e4d633fdb1a769524 Mon Sep 17 00:00:00 2001 From: Luc BOLOGNA Date: Mon, 29 May 2023 19:45:34 +0200 Subject: [PATCH 026/182] Update PredictInternational on Model.Predict.cs Fix issue if data_handler.steps() > 1 --- src/TensorFlowNET.Keras/Engine/Model.Predict.cs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Predict.cs b/src/TensorFlowNET.Keras/Engine/Model.Predict.cs index fc8d784ca..cbe4a7295 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Predict.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Predict.cs @@ -99,7 +99,8 @@ Tensors PredictInternal(DataHandler data_handler, int verbose) } else { - batch_outputs = tf.concat(new Tensor[] { batch_outputs, tmp_batch_outputs[0] }, axis: 0); + for (int i = 0; i < batch_outputs.Length; i++) + batch_outputs[i] = tf.concat(new Tensor[] { batch_outputs[i], tmp_batch_outputs[i] }, axis: 0); } var end_step = step + data_handler.StepIncrement; @@ -116,7 +117,7 @@ Tensors run_predict_step(OwnedIterator iterator) { var data = iterator.next(); var outputs = predict_step(data); - tf_with(ops.control_dependencies(new object[0]), ctl => _predict_counter.assign_add(1)); + tf_with(ops.control_dependencies(Array.Empty()), ctl => _predict_counter.assign_add(1)); return outputs; } From ec8bd2eb330642d39b62ce1d743ce805932ce08e Mon Sep 17 00:00:00 2001 From: Luc BOLOGNA Date: Thu, 1 Jun 2023 23:50:55 +0200 Subject: [PATCH 027/182] refacto: Standardize TensorFlowNET.Keras/Losses/ Smooth implementation --- .../Losses/BinaryCrossentropy.cs | 4 +- .../Losses/CategoricalCrossentropy.cs | 4 +- .../Losses/CosineSimilarity.cs | 40 ++++----- src/TensorFlowNET.Keras/Losses/Huber.cs | 53 +++++------ src/TensorFlowNET.Keras/Losses/LogCosh.cs | 37 ++++---- src/TensorFlowNET.Keras/Losses/Loss.cs | 90 +++++++++---------- .../Losses/LossFunctionWrapper.cs | 22 +++-- .../Losses/MeanAbsoluteError.cs | 29 +++--- .../Losses/MeanAbsolutePercentageError.cs | 31 +++---- .../Losses/MeanSquaredError.cs | 29 +++--- .../Losses/MeanSquaredLogarithmicError.cs | 49 +++++----- .../Losses/SigmoidFocalCrossEntropy.cs | 3 +- .../Losses/SparseCategoricalCrossentropy.cs | 62 ++++++------- 13 files changed, 200 insertions(+), 253 deletions(-) diff --git a/src/TensorFlowNET.Keras/Losses/BinaryCrossentropy.cs b/src/TensorFlowNET.Keras/Losses/BinaryCrossentropy.cs index ff7bb6b70..0de50a7ec 100644 --- a/src/TensorFlowNET.Keras/Losses/BinaryCrossentropy.cs +++ b/src/TensorFlowNET.Keras/Losses/BinaryCrossentropy.cs @@ -1,8 +1,9 @@ namespace Tensorflow.Keras.Losses; -public class BinaryCrossentropy : LossFunctionWrapper, ILossFunc +public class BinaryCrossentropy : LossFunctionWrapper { float label_smoothing; + public BinaryCrossentropy( bool from_logits = false, float label_smoothing = 0, @@ -15,7 +16,6 @@ public BinaryCrossentropy( this.label_smoothing = label_smoothing; } - public override Tensor Apply(Tensor y_true, Tensor y_pred, bool from_logits = false, int axis = -1) { var sum = keras.backend.binary_crossentropy(y_true, y_pred, from_logits: from_logits); diff --git a/src/TensorFlowNET.Keras/Losses/CategoricalCrossentropy.cs b/src/TensorFlowNET.Keras/Losses/CategoricalCrossentropy.cs index feb052244..1af57b552 100644 --- a/src/TensorFlowNET.Keras/Losses/CategoricalCrossentropy.cs +++ b/src/TensorFlowNET.Keras/Losses/CategoricalCrossentropy.cs @@ -1,8 +1,9 @@ namespace Tensorflow.Keras.Losses; -public class CategoricalCrossentropy : LossFunctionWrapper, ILossFunc +public class CategoricalCrossentropy : LossFunctionWrapper { float label_smoothing; + public CategoricalCrossentropy( bool from_logits = false, float label_smoothing = 0, @@ -15,7 +16,6 @@ public CategoricalCrossentropy( this.label_smoothing = label_smoothing; } - public override Tensor Apply(Tensor y_true, Tensor y_pred, bool from_logits = false, int axis = -1) { // Try to adjust the shape so that rank of labels = rank of logits - 1. diff --git a/src/TensorFlowNET.Keras/Losses/CosineSimilarity.cs b/src/TensorFlowNET.Keras/Losses/CosineSimilarity.cs index 16ab4b799..cf9df8d0d 100644 --- a/src/TensorFlowNET.Keras/Losses/CosineSimilarity.cs +++ b/src/TensorFlowNET.Keras/Losses/CosineSimilarity.cs @@ -1,28 +1,22 @@ -using System; -using System.Collections.Generic; -using System.Text; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; +namespace Tensorflow.Keras.Losses; -namespace Tensorflow.Keras.Losses +public class CosineSimilarity : LossFunctionWrapper { - public class CosineSimilarity : LossFunctionWrapper, ILossFunc + protected int axis = -1; + + public CosineSimilarity( + string reduction = null, + int axis = -1, + string name = null) : + base(reduction: reduction, name: name == null ? "cosine_similarity" : name) { - protected int axis=-1; - public CosineSimilarity( - string reduction = null, - int axis=-1, - string name = null) : - base(reduction: reduction, name: name == null ? "cosine_similarity" : name) - { - this.axis = axis; - } + this.axis = axis; + } - public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) - { - Tensor y_true_normalize = nn_impl.l2_normalize(y_true, axis : this.axis); - Tensor y_pred_normalize = nn_impl.l2_normalize(y_pred, axis: this.axis); - return -math_ops.reduce_sum(y_true_normalize * y_pred_normalize, axis : constant_op.constant(this.axis)); - } + public override Tensor Apply(Tensor y_true = null, Tensor y_pred = null, bool from_logits = false, int axis = -1) + { + Tensor y_true_normalize = nn_impl.l2_normalize(y_true, axis: this.axis); + Tensor y_pred_normalize = nn_impl.l2_normalize(y_pred, axis: this.axis); + return -math_ops.reduce_sum(y_true_normalize * y_pred_normalize, axis: constant_op.constant(this.axis)); } -} +} \ No newline at end of file diff --git a/src/TensorFlowNET.Keras/Losses/Huber.cs b/src/TensorFlowNET.Keras/Losses/Huber.cs index 7169ba461..61f006d2b 100644 --- a/src/TensorFlowNET.Keras/Losses/Huber.cs +++ b/src/TensorFlowNET.Keras/Losses/Huber.cs @@ -1,36 +1,29 @@ -using System; -using System.Collections.Generic; -using System.Text; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; +namespace Tensorflow.Keras.Losses; -namespace Tensorflow.Keras.Losses +public class Huber : LossFunctionWrapper { - public class Huber : LossFunctionWrapper, ILossFunc + protected Tensor delta = tf.Variable(1.0); + + public Huber( + string reduction = null, + Tensor delta = null, + string name = null) : + base(reduction: reduction, name: name == null ? "huber" : name) { - protected Tensor delta = tf.Variable(1.0) ; - public Huber ( - string reduction = null, - Tensor delta = null, - string name = null) : - base(reduction: reduction, name: name == null ? "huber" : name) - { - this.delta = delta==null? this.delta: delta; - - } + this.delta = delta == null ? this.delta : delta; + } - public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) - { - Tensor y_pred_cast = math_ops.cast(y_pred, dtype: TF_DataType.TF_FLOAT); - Tensor y_true_cast = math_ops.cast(y_true, dtype: TF_DataType.TF_FLOAT); - Tensor delta = math_ops.cast(this.delta, dtype: TF_DataType.TF_FLOAT); - Tensor error = math_ops.subtract(y_pred_cast, y_true_cast); - Tensor abs_error = math_ops.abs(error); - Tensor half = ops.convert_to_tensor(0.5, dtype: abs_error.dtype); - return gen_math_ops.mean(array_ops.where_v2(abs_error <= delta, - half * math_ops.pow(error, 2), - half * math_ops.pow(delta, 2) + delta * (abs_error - delta)), - ops.convert_to_tensor(-1)); - } + public override Tensor Apply(Tensor y_true = null, Tensor y_pred = null, bool from_logits = false, int axis = -1) + { + Tensor y_pred_cast = math_ops.cast(y_pred, dtype: TF_DataType.TF_FLOAT); + Tensor y_true_cast = math_ops.cast(y_true, dtype: TF_DataType.TF_FLOAT); + Tensor delta = math_ops.cast(this.delta, dtype: TF_DataType.TF_FLOAT); + Tensor error = math_ops.subtract(y_pred_cast, y_true_cast); + Tensor abs_error = math_ops.abs(error); + Tensor half = ops.convert_to_tensor(0.5, dtype: abs_error.dtype); + return gen_math_ops.mean(array_ops.where_v2(abs_error <= delta, + half * math_ops.pow(error, 2), + half * math_ops.pow(delta, 2) + delta * (abs_error - delta)), + ops.convert_to_tensor(-1)); } } diff --git a/src/TensorFlowNET.Keras/Losses/LogCosh.cs b/src/TensorFlowNET.Keras/Losses/LogCosh.cs index 7cfd4f67b..0c7a9b6e2 100644 --- a/src/TensorFlowNET.Keras/Losses/LogCosh.cs +++ b/src/TensorFlowNET.Keras/Losses/LogCosh.cs @@ -1,27 +1,20 @@ -using System; -using System.Collections.Generic; -using System.Text; -using Tensorflow.Operations; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; +namespace Tensorflow.Keras.Losses; -namespace Tensorflow.Keras.Losses +public class LogCosh : LossFunctionWrapper { - public class LogCosh : LossFunctionWrapper, ILossFunc - { - public LogCosh( - string reduction = null, - string name = null) : - base(reduction: reduction, name: name == null ? "log_cosh" : name){ } + public LogCosh( + string reduction = null, + string name = null) : + base(reduction: reduction, name: name == null ? "log_cosh" : name) + { } - public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) - { - Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); - Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - Tensor x = y_pred_dispatch - y_true_cast; + public override Tensor Apply(Tensor y_true = null, Tensor y_pred = null, bool from_logits = false, int axis = -1) + { + Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); + Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); + Tensor x = y_pred_dispatch - y_true_cast; - return gen_math_ops.mean(x + gen_nn_ops.softplus(-2.0 * x) - math_ops.cast(math_ops.log(tf.Variable(2.0)), x.dtype), - ops.convert_to_tensor(-1)); - } + return gen_math_ops.mean(x + gen_nn_ops.softplus(-2.0 * x) - math_ops.cast(math_ops.log(tf.Variable(2.0)), x.dtype), + ops.convert_to_tensor(-1)); } -} +} \ No newline at end of file diff --git a/src/TensorFlowNET.Keras/Losses/Loss.cs b/src/TensorFlowNET.Keras/Losses/Loss.cs index 77bf7e1dc..ce77f6d63 100644 --- a/src/TensorFlowNET.Keras/Losses/Loss.cs +++ b/src/TensorFlowNET.Keras/Losses/Loss.cs @@ -1,55 +1,51 @@ -using System; -using Tensorflow.Keras.Utils; +using Tensorflow.Keras.Utils; -namespace Tensorflow.Keras.Losses +namespace Tensorflow.Keras.Losses; + +/// +/// Loss base class. +/// +public abstract class Loss : ILossFunc { - /// - /// Loss base class. - /// - public abstract class Loss + protected string reduction; + protected string name; + bool _allow_sum_over_batch_size; + protected bool from_logits = false; + string _name_scope; + + public string Reduction => reduction; + public string Name => name; + + public Loss(string reduction = ReductionV2.AUTO, + string name = null, + bool from_logits = false) { - protected string reduction; - protected string name; - bool _allow_sum_over_batch_size; - protected bool from_logits = false; - string _name_scope; - - public string Reduction => reduction; - public string Name => name; - public Loss(string reduction = ReductionV2.AUTO, - string name = null, - bool from_logits = false) - { - this.reduction = reduction == null ? ReductionV2.SUM_OVER_BATCH_SIZE : reduction; - this.name = name; - this.from_logits = from_logits; - _allow_sum_over_batch_size = false; - } + this.reduction = reduction == null ? ReductionV2.SUM_OVER_BATCH_SIZE : reduction; + this.name = name; + this.from_logits = from_logits; + _allow_sum_over_batch_size = false; + } - public virtual Tensor Apply(Tensor y_true, Tensor y_pred, bool from_logits = false, int axis = -1) - { - throw new NotImplementedException(""); - } + public abstract Tensor Apply(Tensor y_true, Tensor y_pred, bool from_logits = false, int axis = -1); - public Tensor Call(Tensor y_true, Tensor y_pred, Tensor sample_weight = null) - { - var losses = Apply(y_true, y_pred, from_logits: from_logits); - var reduction = GetReduction(); - return losses_utils.compute_weighted_loss(losses, reduction: reduction, sample_weight: sample_weight); - } + public Tensor Call(Tensor y_true, Tensor y_pred, Tensor sample_weight = null) + { + var losses = Apply(y_true, y_pred, from_logits: from_logits); + var reduction = GetReduction(); + return losses_utils.compute_weighted_loss(losses, reduction: reduction, sample_weight: sample_weight); + } - string GetReduction() - { - return reduction switch - { - ReductionV2.AUTO => ReductionV2.SUM_OVER_BATCH_SIZE, - _ => reduction - }; - } - - void _set_name_scope() + string GetReduction() + { + return reduction switch { - _name_scope = name; - } + ReductionV2.AUTO => ReductionV2.SUM_OVER_BATCH_SIZE, + _ => reduction + }; + } + + void _set_name_scope() + { + _name_scope = name; } -} +} \ No newline at end of file diff --git a/src/TensorFlowNET.Keras/Losses/LossFunctionWrapper.cs b/src/TensorFlowNET.Keras/Losses/LossFunctionWrapper.cs index 758b46f4b..f4ee2b346 100644 --- a/src/TensorFlowNET.Keras/Losses/LossFunctionWrapper.cs +++ b/src/TensorFlowNET.Keras/Losses/LossFunctionWrapper.cs @@ -1,16 +1,14 @@ using Tensorflow.Keras.Utils; -namespace Tensorflow.Keras.Losses +namespace Tensorflow.Keras.Losses; + +public abstract class LossFunctionWrapper : Loss { - public class LossFunctionWrapper : Loss - { - public LossFunctionWrapper(string reduction = ReductionV2.AUTO, - string name = null, - bool from_logits = false) - : base(reduction: reduction, - name: name, - from_logits: from_logits) - { - } - } + public LossFunctionWrapper(string reduction = ReductionV2.AUTO, + string name = null, + bool from_logits = false) + : base(reduction: reduction, + name: name, + from_logits: from_logits) + { } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs b/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs index c203bc5ad..19476a68a 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs @@ -1,23 +1,16 @@ -using System; -using System.Collections.Generic; -using System.Text; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; +namespace Tensorflow.Keras.Losses; -namespace Tensorflow.Keras.Losses +public class MeanAbsoluteError : LossFunctionWrapper { - public class MeanAbsoluteError : LossFunctionWrapper, ILossFunc - { - public MeanAbsoluteError( - string reduction = null, - string name = null) : - base(reduction: reduction, name: name == null ? "mean_absolute_error" : name){ } + public MeanAbsoluteError( + string reduction = null, + string name = null) : + base(reduction: reduction, name: name == null ? "mean_absolute_error" : name){ } - public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) - { - Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); - Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - return gen_math_ops.mean(math_ops.abs(y_pred_dispatch - y_true_cast), ops.convert_to_tensor(-1)); - } + public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) + { + Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); + Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); + return gen_math_ops.mean(math_ops.abs(y_pred_dispatch - y_true_cast), ops.convert_to_tensor(-1)); } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs b/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs index 8dcaa1bcc..226c4237a 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs @@ -1,24 +1,17 @@ -using System; -using System.Collections.Generic; -using System.Text; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; +namespace Tensorflow.Keras.Losses; -namespace Tensorflow.Keras.Losses +public class MeanAbsolutePercentageError : LossFunctionWrapper { - public class MeanAbsolutePercentageError : LossFunctionWrapper, ILossFunc - { - public MeanAbsolutePercentageError( - string reduction = null, - string name = null) : - base(reduction: reduction, name: name == null ? "mean_absolute_percentage_error" : name){ } + public MeanAbsolutePercentageError( + string reduction = null, + string name = null) : + base(reduction: reduction, name: name == null ? "mean_absolute_percentage_error" : name){ } - public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) - { - Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); - Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - Tensor diff = math_ops.abs(y_true_cast - y_pred_dispatch) / gen_math_ops.maximum(math_ops.abs(y_true_cast), gen_math_ops.cast(tf.constant(1e-7), y_pred_dispatch.dtype)); - return gen_math_ops.cast(tf.constant(100), y_pred_dispatch.dtype) * gen_math_ops.mean(diff, ops.convert_to_tensor(-1)); - } + public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) + { + Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); + Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); + Tensor diff = math_ops.abs(y_true_cast - y_pred_dispatch) / gen_math_ops.maximum(math_ops.abs(y_true_cast), gen_math_ops.cast(tf.constant(1e-7), y_pred_dispatch.dtype)); + return gen_math_ops.cast(tf.constant(100), y_pred_dispatch.dtype) * gen_math_ops.mean(diff, ops.convert_to_tensor(-1)); } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs b/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs index 73cddef14..a937c1963 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs @@ -1,23 +1,16 @@ -using System; -using System.Collections.Generic; -using System.Text; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; +namespace Tensorflow.Keras.Losses; -namespace Tensorflow.Keras.Losses +public class MeanSquaredError : LossFunctionWrapper { - public class MeanSquaredError : LossFunctionWrapper, ILossFunc - { - public MeanSquaredError( - string reduction = null, - string name = null) : - base(reduction: reduction, name: name==null? "mean_squared_error" : name){ } + public MeanSquaredError( + string reduction = null, + string name = null) : + base(reduction: reduction, name: name==null? "mean_squared_error" : name){ } - public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) - { - Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); - Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - return gen_math_ops.mean(gen_math_ops.squared_difference(y_pred_dispatch, y_true_cast), ops.convert_to_tensor(-1)); - } + public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) + { + Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); + Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); + return gen_math_ops.mean(gen_math_ops.squared_difference(y_pred_dispatch, y_true_cast), ops.convert_to_tensor(-1)); } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs b/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs index e29659218..0a4e7d3c5 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs @@ -1,33 +1,28 @@ -using System; -using System.Collections.Generic; -using System.Text; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; +namespace Tensorflow.Keras.Losses; -namespace Tensorflow.Keras.Losses +public class MeanSquaredLogarithmicError : LossFunctionWrapper { - public class MeanSquaredLogarithmicError : LossFunctionWrapper, ILossFunc - { - public MeanSquaredLogarithmicError( - string reduction = null, - string name = null) : - base(reduction: reduction, name: name == null ? "mean_squared_logarithmic_error" : name){ } - + public MeanSquaredLogarithmicError( + string reduction = null, + string name = null) : + base(reduction: reduction, name: name == null ? "mean_squared_logarithmic_error" : name) + { } - public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool from_logits = false, int axis = -1) + public override Tensor Apply(Tensor y_true = null, Tensor y_pred = null, bool from_logits = false, int axis = -1) + { + Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); + Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); + Tensor first_log = null, second_log = null; + if (y_pred_dispatch.dtype == TF_DataType.TF_DOUBLE) + { + first_log = math_ops.log(math_ops.maximum(y_pred_dispatch, 1e-7) + 1.0); + second_log = math_ops.log(math_ops.maximum(y_true_cast, 1e-7) + 1.0); + } + else { - Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); - Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - Tensor first_log=null, second_log=null; - if (y_pred_dispatch.dtype == TF_DataType.TF_DOUBLE) { - first_log = math_ops.log(math_ops.maximum(y_pred_dispatch, 1e-7) + 1.0); - second_log = math_ops.log(math_ops.maximum(y_true_cast, 1e-7) + 1.0); - } - else { - first_log = math_ops.log(math_ops.maximum(y_pred_dispatch, 1e-7f) + 1.0f); - second_log = math_ops.log(math_ops.maximum(y_true_cast, 1e-7f) + 1.0f); - } - return gen_math_ops.mean(gen_math_ops.squared_difference(first_log, second_log), ops.convert_to_tensor(-1)); + first_log = math_ops.log(math_ops.maximum(y_pred_dispatch, 1e-7f) + 1.0f); + second_log = math_ops.log(math_ops.maximum(y_true_cast, 1e-7f) + 1.0f); } + return gen_math_ops.mean(gen_math_ops.squared_difference(first_log, second_log), ops.convert_to_tensor(-1)); } -} +} \ No newline at end of file diff --git a/src/TensorFlowNET.Keras/Losses/SigmoidFocalCrossEntropy.cs b/src/TensorFlowNET.Keras/Losses/SigmoidFocalCrossEntropy.cs index 7ac3fa0bb..ec6dcedf8 100644 --- a/src/TensorFlowNET.Keras/Losses/SigmoidFocalCrossEntropy.cs +++ b/src/TensorFlowNET.Keras/Losses/SigmoidFocalCrossEntropy.cs @@ -2,7 +2,7 @@ namespace Tensorflow.Keras.Losses; -public class SigmoidFocalCrossEntropy : LossFunctionWrapper, ILossFunc +public class SigmoidFocalCrossEntropy : LossFunctionWrapper { float _alpha; float _gamma; @@ -20,7 +20,6 @@ public SigmoidFocalCrossEntropy(bool from_logits = false, _gamma = gamma; } - public override Tensor Apply(Tensor y_true, Tensor y_pred, bool from_logits = false, int axis = -1) { y_true = tf.cast(y_true, dtype: y_pred.dtype); diff --git a/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs b/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs index 4e2790ab1..17ce2d30b 100644 --- a/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs +++ b/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs @@ -1,41 +1,41 @@ using static Tensorflow.Binding; -namespace Tensorflow.Keras.Losses +namespace Tensorflow.Keras.Losses; + +public class SparseCategoricalCrossentropy : LossFunctionWrapper { - public class SparseCategoricalCrossentropy : LossFunctionWrapper, ILossFunc + private bool _from_logits = false; + + public SparseCategoricalCrossentropy( + bool from_logits = false, + string reduction = null, + string name = null) : + base(reduction: reduction, name: name == null ? "sparse_categorical_crossentropy" : name) + { + _from_logits = from_logits; + } + + public override Tensor Apply(Tensor target, Tensor output, bool from_logits = false, int axis = -1) { - private bool _from_logits = false; - public SparseCategoricalCrossentropy( - bool from_logits = false, - string reduction = null, - string name = null) : - base(reduction: reduction, name: name == null ? "sparse_categorical_crossentropy" : name) + target = tf.cast(target, dtype: TF_DataType.TF_INT64); + + if (!_from_logits) { - _from_logits = from_logits; + var epsilon = tf.constant(KerasApi.keras.backend.epsilon(), output.dtype); + output = tf.clip_by_value(output, epsilon, 1 - epsilon); + output = tf.log(output); } - public override Tensor Apply(Tensor target, Tensor output, bool from_logits = false, int axis = -1) + // Try to adjust the shape so that rank of labels = rank of logits - 1. + var output_shape = array_ops.shape_v2(output); + var output_rank = output.shape.ndim; + var target_rank = target.shape.ndim; + var update_shape = target_rank != output_rank - 1; + if (update_shape) { - target = tf.cast(target, dtype: TF_DataType.TF_INT64); - - if (!_from_logits) - { - var epsilon = tf.constant(KerasApi.keras.backend.epsilon(), output.dtype); - output = tf.clip_by_value(output, epsilon, 1 - epsilon); - output = tf.log(output); - } - - // Try to adjust the shape so that rank of labels = rank of logits - 1. - var output_shape = array_ops.shape_v2(output); - var output_rank = output.shape.ndim; - var target_rank = target.shape.ndim; - var update_shape = target_rank != output_rank - 1; - if (update_shape) - { - target = array_ops.reshape(target, new int[] { -1 }); - output = array_ops.reshape(output, new int[] { -1, output_shape[-1].numpy() }); - } - return tf.nn.sparse_softmax_cross_entropy_with_logits(target, output); + target = array_ops.reshape(target, new int[] { -1 }); + output = array_ops.reshape(output, new int[] { -1, output_shape[-1].numpy() }); } + return tf.nn.sparse_softmax_cross_entropy_with_logits(target, output); } -} +} \ No newline at end of file From 3474a8565f8970416faf90542f88421cfd1b90bd Mon Sep 17 00:00:00 2001 From: RayWang <75263275+RayWang-iat@users.noreply.github.com> Date: Fri, 2 Jun 2023 23:16:38 +0800 Subject: [PATCH 028/182] Update Numpy.Math.cs --- src/TensorFlowNET.Core/NumPy/Numpy.Math.cs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs b/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs index 0e50cd564..ea85048f8 100644 --- a/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs +++ b/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs @@ -28,7 +28,16 @@ public partial class np public static NDArray multiply(NDArray x1, NDArray x2) => new NDArray(tf.multiply(x1, x2)); [AutoNumPy] - public static NDArray maximum(NDArray x1, NDArray x2) => new NDArray(tf.maximum(x1, x2)); + //public static NDArray maximum(NDArray x1, NDArray x2) => new NDArray(tf.maximum(x1, x2)); + public static NDArray maximum(NDArray x1, NDArray x2, int? axis = null) + { + var maxValues = tf.maximum(x1, x2); + if (axis.HasValue) + { + maxValues = tf.reduce_max(maxValues, axis: axis.Value); + } + return new NDArray(maxValues); + } [AutoNumPy] public static NDArray minimum(NDArray x1, NDArray x2) => new NDArray(tf.minimum(x1, x2)); From 94edda54cdfdddda889a1ce544d91e9d3e189481 Mon Sep 17 00:00:00 2001 From: RayWang <75263275+RayWang-iat@users.noreply.github.com> Date: Fri, 2 Jun 2023 23:23:33 +0800 Subject: [PATCH 029/182] Update Math.Test.cs --- test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs b/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs index a0e6fa4ec..6e00504b8 100644 --- a/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs +++ b/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs @@ -65,5 +65,17 @@ public void power() var y = np.power(x, 3); Assert.AreEqual(y, new[] { 0, 1, 8, 27, 64, 125 }); } + [TestMethod] + public void maximum() + { + var x1 = new NDArray(new[,] { { 1, 2, 3 }, { 4, 5.1, 6 } }); + var x2 = new NDArray(new[,] { { 3, 2, 1 }, { 6, 5.1, 4 } }); + var y = np.maximum(x1,x2); + var y1 = np.maximum(x1, x2, axis: 0); + var y2 = new NDArray(new[,] { { 3, 2, 3 }, { 6, 5.1, 6 } }); + var y3 = new NDArray(new[] { 6, 5.1, 6 }); + Assert.AreEqual(y, y2); + Assert.AreEqual(y1, y3); + } } } From f45b35b4cf43b73207905b350129d55144b17bd6 Mon Sep 17 00:00:00 2001 From: RayWang <75263275+RayWang-iat@users.noreply.github.com> Date: Mon, 5 Jun 2023 11:26:06 +0800 Subject: [PATCH 030/182] Update Math.Test.cs --- test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs b/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs index 6e00504b8..32b517e4f 100644 --- a/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs +++ b/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs @@ -66,16 +66,19 @@ public void power() Assert.AreEqual(y, new[] { 0, 1, 8, 27, 64, 125 }); } [TestMethod] - public void maximum() + public void maximum() { var x1 = new NDArray(new[,] { { 1, 2, 3 }, { 4, 5.1, 6 } }); var x2 = new NDArray(new[,] { { 3, 2, 1 }, { 6, 5.1, 4 } }); - var y = np.maximum(x1,x2); + var y0 = np.maximum(x1,x2); var y1 = np.maximum(x1, x2, axis: 0); - var y2 = new NDArray(new[,] { { 3, 2, 3 }, { 6, 5.1, 6 } }); - var y3 = new NDArray(new[] { 6, 5.1, 6 }); - Assert.AreEqual(y, y2); - Assert.AreEqual(y1, y3); + var y2 = np.maximum(x1, x2, axis: 1); + var y3 = new NDArray(new[,] { { 3, 2, 3 }, { 6, 5.1, 6 } }); + var y4 = new NDArray(new[] { 6, 5.1, 6 }); + var y5 = new NDArray(new[] { 3.0, 6 }); + Assert.AreEqual(y0, y3); + Assert.AreEqual(y1, y4); + Assert.AreEqual(y2, y5); } } } From 46e190dbfc871ce4dd780d58d888d6406cc0285e Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Tue, 6 Jun 2023 11:12:49 +0800 Subject: [PATCH 031/182] feat: add RNN basic framework. --- .../Extensions/DictionaryExtension.cs | 0 .../Extensions/JObjectExtensions.cs | 6 +- .../Common/Extensions/LinqExtensions.cs | 26 + .../{ => Common}/Extensions/OneofExtension.cs | 0 .../Common/Types/GeneralizedTensorShape.cs | 79 +++ .../Common/Types/IOptionalArgs.cs | 21 + .../Types}/NamedTuple.cs | 0 .../Types}/TensorShapeConfig.cs | 2 +- .../Keras/ArgsDefinition/Rnn/RNNArgs.cs | 11 +- .../ArgsDefinition/Rnn/RnnOptionalArgs.cs | 14 + .../ArgsDefinition/Rnn/SimpleRNNCellArgs.cs | 29 + src/TensorFlowNET.Core/Keras/Layers/ILayer.cs | 5 +- .../Keras/Layers/Rnn/IRnnCell.cs | 19 + .../Keras/Layers/Rnn/IStackedRnnCells.cs | 12 + ...stomizedKerasShapesWrapperJsonConverter.cs | 1 + .../Keras/Saving/KerasShapesWrapper.cs | 1 + src/TensorFlowNET.Core/NumPy/Axis.cs | 5 - .../Operations/Initializers/Orthogonal.cs | 2 +- .../Operations/NnOps/BasicLSTMCell.cs | 1 + .../Operations/NnOps/BasicRNNCell.cs | 1 + .../Operations/NnOps/LayerRNNCell.cs | 1 + .../Operations/NnOps/RNNCell.cs | 15 +- .../Operations/logging_ops.cs | 2 +- src/TensorFlowNET.Core/Operations/sort_ops.cs | 2 +- .../Tensorflow.Binding.csproj | 5 + src/TensorFlowNET.Core/Tensors/Tensors.cs | 40 +- src/TensorFlowNET.Core/Util/nest.py.cs | 33 + src/TensorFlowNET.Keras/BackendImpl.cs | 510 ++++++++++++++++ src/TensorFlowNET.Keras/Engine/Functional.cs | 5 +- src/TensorFlowNET.Keras/Engine/Layer.Apply.cs | 7 +- src/TensorFlowNET.Keras/Engine/Layer.cs | 4 +- src/TensorFlowNET.Keras/Engine/Model.cs | 2 +- src/TensorFlowNET.Keras/Engine/Sequential.cs | 3 +- .../Layers/Activation/ELU.cs | 3 +- .../Layers/Activation/Exponential.cs | 4 +- .../Layers/Activation/HardSigmoid.cs | 3 +- .../Layers/Activation/LeakyReLu.cs | 3 +- .../Layers/Activation/SELU.cs | 3 +- .../Layers/Activation/Softmax.cs | 5 +- .../Layers/Activation/Softplus.cs | 3 +- .../Layers/Activation/Softsign.cs | 3 +- .../Layers/Activation/Swish.cs | 3 +- .../Layers/Activation/Tanh.cs | 3 +- .../Layers/Attention/BaseDenseAttention.cs | 3 +- .../Layers/Attention/MultiHeadAttention.cs | 5 +- .../Layers/Convolution/Conv2DTranspose.cs | 3 +- .../Layers/Convolution/Convolutional.cs | 3 +- src/TensorFlowNET.Keras/Layers/Core/Dense.cs | 3 +- .../Layers/Core/EinsumDense.cs | 3 +- .../Layers/Core/Embedding.cs | 3 +- .../Layers/Merging/Merge.cs | 3 +- .../Normalization/BatchNormalization.cs | 3 +- .../Normalization/LayerNormalization.cs | 3 +- .../Layers/Normalization/Normalization.cs | 3 +- .../Layers/Pooling/GlobalAveragePooling1D.cs | 3 +- .../Layers/Pooling/GlobalAveragePooling2D.cs | 3 +- .../Layers/Pooling/GlobalMaxPooling1D.cs | 3 +- .../Layers/Pooling/GlobalMaxPooling2D.cs | 3 +- .../Layers/Pooling/Pooling1D.cs | 3 +- .../Layers/Pooling/Pooling2D.cs | 3 +- .../Layers/Preprocessing/CategoryEncoding.cs | 4 +- .../Layers/Preprocessing/Rescaling.cs | 3 +- .../Layers/Preprocessing/Resizing.cs | 3 +- .../Layers/Regularization/Dropout.cs | 5 +- .../Layers/Reshaping/Cropping1D.cs | 4 +- .../Layers/Reshaping/Cropping2D.cs | 3 +- .../Layers/Reshaping/Cropping3D.cs | 3 +- .../Layers/Reshaping/Flatten.cs | 3 +- .../Layers/Reshaping/Permute.cs | 3 +- .../Layers/Reshaping/Reshape.cs | 3 +- .../Layers/Reshaping/UpSampling2D.cs | 3 +- .../Layers/Reshaping/ZeroPadding2D.cs | 3 +- .../Layers/Rnn/DropoutRNNCellMixin.cs | 85 +++ src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs | 5 +- src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 569 +++++++++++++++--- src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs | 13 + .../Layers/Rnn/RnnCellBase.cs | 24 + .../Layers/Rnn/SimpleRNN.cs | 22 +- .../Layers/Rnn/SimpleRNNCell.cs | 113 +++- .../Layers/Rnn/StackedRNNCells.cs | 13 +- .../Layers/TensorFlowOpLayer.cs | 3 +- .../Metrics/metrics_utils.cs | 2 +- ...processing.image_dataset_from_directory.cs | 2 +- .../Saving/KerasObjectLoader.cs | 2 +- src/TensorFlowNET.Keras/Utils/RnnUtils.cs | 93 +++ .../Layers/LayersTest.cs | 11 - .../Layers/Rnn.Test.cs | 28 + tools/TensorFlowNET.Console/SimpleRnnTest.cs | 2 +- 88 files changed, 1789 insertions(+), 188 deletions(-) rename src/TensorFlowNET.Core/{ => Common}/Extensions/DictionaryExtension.cs (100%) rename src/TensorFlowNET.Core/{ => Common}/Extensions/JObjectExtensions.cs (80%) create mode 100644 src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs rename src/TensorFlowNET.Core/{ => Common}/Extensions/OneofExtension.cs (100%) create mode 100644 src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs create mode 100644 src/TensorFlowNET.Core/Common/Types/IOptionalArgs.cs rename src/TensorFlowNET.Core/{Extensions => Common/Types}/NamedTuple.cs (100%) rename src/TensorFlowNET.Core/{Keras/Saving => Common/Types}/TensorShapeConfig.cs (95%) create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs create mode 100644 src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs create mode 100644 src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs create mode 100644 src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs create mode 100644 src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs create mode 100644 src/TensorFlowNET.Keras/Utils/RnnUtils.cs create mode 100644 test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs diff --git a/src/TensorFlowNET.Core/Extensions/DictionaryExtension.cs b/src/TensorFlowNET.Core/Common/Extensions/DictionaryExtension.cs similarity index 100% rename from src/TensorFlowNET.Core/Extensions/DictionaryExtension.cs rename to src/TensorFlowNET.Core/Common/Extensions/DictionaryExtension.cs diff --git a/src/TensorFlowNET.Core/Extensions/JObjectExtensions.cs b/src/TensorFlowNET.Core/Common/Extensions/JObjectExtensions.cs similarity index 80% rename from src/TensorFlowNET.Core/Extensions/JObjectExtensions.cs rename to src/TensorFlowNET.Core/Common/Extensions/JObjectExtensions.cs index 2e758dbf1..6ceba445a 100644 --- a/src/TensorFlowNET.Core/Extensions/JObjectExtensions.cs +++ b/src/TensorFlowNET.Core/Common/Extensions/JObjectExtensions.cs @@ -3,16 +3,16 @@ using System.Collections.Generic; using System.Text; -namespace Tensorflow.Extensions +namespace Tensorflow.Common.Extensions { public static class JObjectExtensions { public static T? TryGetOrReturnNull(this JObject obj, string key) { var res = obj[key]; - if(res is null) + if (res is null) { - return default(T); + return default; } else { diff --git a/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs b/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs new file mode 100644 index 000000000..0402fca03 --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs @@ -0,0 +1,26 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; + +namespace Tensorflow.Common.Extensions +{ + public static class LinqExtensions + { +#if NETSTANDARD2_0 + public static IEnumerable TakeLast(this IEnumerable sequence, int count) + { + return sequence.Skip(sequence.Count() - count); + } + + public static IEnumerable SkipLast(this IEnumerable sequence, int count) + { + return sequence.Take(sequence.Count() - count); + } +#endif + public static Tensors ToTensors(this IEnumerable tensors) + { + return new Tensors(tensors); + } + } +} diff --git a/src/TensorFlowNET.Core/Extensions/OneofExtension.cs b/src/TensorFlowNET.Core/Common/Extensions/OneofExtension.cs similarity index 100% rename from src/TensorFlowNET.Core/Extensions/OneofExtension.cs rename to src/TensorFlowNET.Core/Common/Extensions/OneofExtension.cs diff --git a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs new file mode 100644 index 000000000..edb9a802f --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs @@ -0,0 +1,79 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; + +namespace Tensorflow.Common.Types +{ + public class GeneralizedTensorShape: IEnumerable + { + public TensorShapeConfig[] Shapes { get; set; } + /// + /// create a single-dim generalized Tensor shape. + /// + /// + public GeneralizedTensorShape(int dim) + { + Shapes = new TensorShapeConfig[] { new TensorShapeConfig() { Items = new long?[] { dim } } }; + } + + public GeneralizedTensorShape(Shape shape) + { + Shapes = new TensorShapeConfig[] { shape }; + } + + public GeneralizedTensorShape(TensorShapeConfig shape) + { + Shapes = new TensorShapeConfig[] { shape }; + } + + public GeneralizedTensorShape(TensorShapeConfig[] shapes) + { + Shapes = shapes; + } + + public GeneralizedTensorShape(IEnumerable shape) + { + Shapes = shape.Select(x => (TensorShapeConfig)x).ToArray(); + } + + public Shape ToSingleShape() + { + if (Shapes.Length != 1) + { + throw new ValueError("The generalized shape contains more than 1 dim."); + } + var shape_config = Shapes[0]; + Debug.Assert(shape_config is not null); + return new Shape(shape_config.Items.Select(x => x is null ? -1 : x.Value).ToArray()); + } + + public long ToNumber() + { + if(Shapes.Length != 1 || Shapes[0].Items.Length != 1) + { + throw new ValueError("The generalized shape contains more than 1 dim."); + } + var res = Shapes[0].Items[0]; + return res is null ? -1 : res.Value; + } + + public Shape[] ToShapeArray() + { + return Shapes.Select(x => new Shape(x.Items.Select(y => y is null ? -1 : y.Value).ToArray())).ToArray(); + } + + public IEnumerator GetEnumerator() + { + foreach (var shape in Shapes) + { + yield return shape.Items; + } + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/IOptionalArgs.cs b/src/TensorFlowNET.Core/Common/Types/IOptionalArgs.cs new file mode 100644 index 000000000..427e71aaa --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/IOptionalArgs.cs @@ -0,0 +1,21 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Common.Types +{ + /// + /// This interface is used when some corresponding python methods have optional args. + /// For example, `Keras.Layer.Apply` generally takes three args as the inputs, while + /// `Keras.Layer.RNN` takes more. Then when calling RNN, you should add `RnnOptionalArgs` + /// as the parameter of the method. + /// + public interface IOptionalArgs + { + /// + /// The identifier of the class. It is not an argument but only something to + /// separate different OptionalArgs. + /// + string Identifier { get; } + } +} diff --git a/src/TensorFlowNET.Core/Extensions/NamedTuple.cs b/src/TensorFlowNET.Core/Common/Types/NamedTuple.cs similarity index 100% rename from src/TensorFlowNET.Core/Extensions/NamedTuple.cs rename to src/TensorFlowNET.Core/Common/Types/NamedTuple.cs diff --git a/src/TensorFlowNET.Core/Keras/Saving/TensorShapeConfig.cs b/src/TensorFlowNET.Core/Common/Types/TensorShapeConfig.cs similarity index 95% rename from src/TensorFlowNET.Core/Keras/Saving/TensorShapeConfig.cs rename to src/TensorFlowNET.Core/Common/Types/TensorShapeConfig.cs index 7abcfde26..a36930eca 100644 --- a/src/TensorFlowNET.Core/Keras/Saving/TensorShapeConfig.cs +++ b/src/TensorFlowNET.Core/Common/Types/TensorShapeConfig.cs @@ -3,7 +3,7 @@ using System.Collections.Generic; using System.Linq; -namespace Tensorflow.Keras.Saving +namespace Tensorflow.Common.Types { public class TensorShapeConfig { diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs index 2585592c1..ed5a1d6dd 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs @@ -1,17 +1,15 @@ using Newtonsoft.Json; using System.Collections.Generic; +using Tensorflow.Keras.Layers.Rnn; namespace Tensorflow.Keras.ArgsDefinition.Rnn { + // TODO(Rinne): add regularizers. public class RNNArgs : AutoSerializeLayerArgs { - public interface IRnnArgCell : ILayer - { - object state_size { get; } - } [JsonProperty("cell")] // TODO: the cell should be serialized with `serialize_keras_object`. - public IRnnArgCell Cell { get; set; } = null; + public IRnnCell Cell { get; set; } = null; [JsonProperty("return_sequences")] public bool ReturnSequences { get; set; } = false; [JsonProperty("return_state")] @@ -34,6 +32,9 @@ public interface IRnnArgCell : ILayer public IInitializer KernelInitializer { get; set; } public IInitializer RecurrentInitializer { get; set; } public IInitializer BiasInitializer { get; set; } + public float Dropout { get; set; } = .0f; + public bool ZeroOutputForMask { get; set; } = false; + public float RecurrentDropout { get; set; } = .0f; // kernel_regularizer=None, // recurrent_regularizer=None, diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs new file mode 100644 index 000000000..64b500bba --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Common.Types; + +namespace Tensorflow.Keras.ArgsDefinition.Rnn +{ + public class RnnOptionalArgs: IOptionalArgs + { + public string Identifier => "Rnn"; + public Tensor Mask { get; set; } = null; + public Tensors Constants { get; set; } = null; + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs new file mode 100644 index 000000000..1dfcbe9cf --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs @@ -0,0 +1,29 @@ +using Newtonsoft.Json; +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition.Rnn +{ + public class SimpleRNNCellArgs: AutoSerializeLayerArgs + { + [JsonProperty("units")] + public int Units { get; set; } + // TODO(Rinne): lack of initialized value of Activation. Merging keras + // into tf.net could resolve it. + [JsonProperty("activation")] + public Activation Activation { get; set; } + [JsonProperty("use_bias")] + public bool UseBias { get; set; } = true; + [JsonProperty("dropout")] + public float Dropout { get; set; } = .0f; + [JsonProperty("recurrent_dropout")] + public float RecurrentDropout { get; set; } = .0f; + [JsonProperty("kernel_initializer")] + public IInitializer KernelInitializer { get; set; } + [JsonProperty("recurrent_initializer")] + public IInitializer RecurrentInitializer { get; set; } + [JsonProperty("bias_initializer")] + public IInitializer BiasInitializer { get; set; } + } +} diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs index f76693945..e94c8bf10 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs @@ -1,4 +1,5 @@ -using Tensorflow.Keras.Engine; +using Tensorflow.Common.Types; +using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; using Tensorflow.NumPy; using Tensorflow.Training; @@ -14,7 +15,7 @@ public interface ILayer: IWithTrackable, IKerasConfigable List Layers { get; } List InboundNodes { get; } List OutboundNodes { get; } - Tensors Apply(Tensors inputs, Tensor state = null, bool training = false); + Tensors Apply(Tensors inputs, Tensors states = null, bool training = false, IOptionalArgs? optional_args = null); List TrainableVariables { get; } List TrainableWeights { get; } List NonTrainableWeights { get; } diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs new file mode 100644 index 000000000..df6222cd0 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Common.Types; + +namespace Tensorflow.Keras.Layers.Rnn +{ + public interface IRnnCell: ILayer + { + GeneralizedTensorShape StateSize { get; } + GeneralizedTensorShape OutputSize { get; } + /// + /// Whether the optional RNN args are supported when appying the layer. + /// In other words, whether `Apply` is overwrited with process of `RnnOptionalArgs`. + /// + bool SupportOptionalArgs { get; } + (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null); + } +} diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs new file mode 100644 index 000000000..e73244a51 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs @@ -0,0 +1,12 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.Rnn +{ + public interface IStackedRnnCells : IRnnCell + { + int Count { get; } + IRnnCell this[int idx] { get; } + } +} diff --git a/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedKerasShapesWrapperJsonConverter.cs b/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedKerasShapesWrapperJsonConverter.cs index 1a4245bf2..3a21db9d2 100644 --- a/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedKerasShapesWrapperJsonConverter.cs +++ b/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedKerasShapesWrapperJsonConverter.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; using System.Text; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Saving.Json { diff --git a/src/TensorFlowNET.Core/Keras/Saving/KerasShapesWrapper.cs b/src/TensorFlowNET.Core/Keras/Saving/KerasShapesWrapper.cs index d91d3161d..ea6fe976f 100644 --- a/src/TensorFlowNET.Core/Keras/Saving/KerasShapesWrapper.cs +++ b/src/TensorFlowNET.Core/Keras/Saving/KerasShapesWrapper.cs @@ -6,6 +6,7 @@ using System.Diagnostics; using OneOf.Types; using Tensorflow.Keras.Saving.Json; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Saving { diff --git a/src/TensorFlowNET.Core/NumPy/Axis.cs b/src/TensorFlowNET.Core/NumPy/Axis.cs index 976c764f2..7a3ecbf10 100644 --- a/src/TensorFlowNET.Core/NumPy/Axis.cs +++ b/src/TensorFlowNET.Core/NumPy/Axis.cs @@ -74,8 +74,3 @@ public override string ToString() => IsScalar ? $"{axis[0]}" : $"({string.Join(", ", axis)})"; } } - -namespace System.Runtime.CompilerServices -{ - internal static class IsExternalInit { } -} diff --git a/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs b/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs index 492047c9f..88673bb5e 100644 --- a/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs +++ b/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs @@ -53,7 +53,7 @@ private Tensor _generate_init_val(Shape shape, TF_DataType dtype) // Compute the qr factorization var (q, r) = tf.linalg.qr(a, full_matrices: false); // Make Q uniform - var d = tf.linalg.tensor_diag_part(r); + var d = tf.linalg.tensor_diag_part(r.Single); q *= tf.sign(d); if (num_rows < num_cols) diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs index d3592514d..b2cda952e 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs @@ -11,6 +11,7 @@ namespace Tensorflow /// Basic LSTM recurrent network cell. /// The implementation is based on: http://arxiv.org/abs/1409.2329. /// + [Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")] public class BasicLstmCell : LayerRnnCell { int _num_units; diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs index 17d51363f..3308aebb7 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs @@ -20,6 +20,7 @@ limitations under the License. namespace Tensorflow { + [Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")] public class BasicRnnCell : LayerRnnCell { int _num_units; diff --git a/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs index 7394cb7f9..65de4fe90 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs @@ -19,6 +19,7 @@ limitations under the License. namespace Tensorflow { + [Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")] public class LayerRnnCell : RnnCell { protected InputSpec inputSpec; diff --git a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs index ecc9ca116..71fdc301d 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs @@ -16,10 +16,12 @@ limitations under the License. using System; using System.Collections.Generic; +using Tensorflow.Common.Types; using Tensorflow.Keras; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; +using Tensorflow.Keras.Layers.Rnn; using Tensorflow.Keras.Saving; using Tensorflow.NumPy; using Tensorflow.Operations; @@ -50,7 +52,8 @@ namespace Tensorflow /// matching structure of Tensors having shape `[batch_size].concatenate(s)` /// for each `s` in `self.batch_size`. /// - public abstract class RnnCell : ILayer, RNNArgs.IRnnArgCell + [Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")] + public abstract class RnnCell : ILayer, IRnnCell { /// /// Attribute that indicates whether the cell is a TF RNN cell, due the slight @@ -142,7 +145,7 @@ private Tensor _zero_state_tensors(object state_size, Tensor batch_size, TF_Data throw new NotImplementedException("_zero_state_tensors"); } - public Tensors Apply(Tensors inputs, Tensor state = null, bool is_training = false) + public Tensors Apply(Tensors inputs, Tensors state = null, bool is_training = false, IOptionalArgs? optional_args = null) { throw new NotImplementedException(); } @@ -173,5 +176,13 @@ public void adapt(Tensor data, int? batch_size = null, int? steps = null) { throw new NotImplementedException(); } + + public (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null) + { + throw new NotImplementedException(); + } + public GeneralizedTensorShape StateSize => throw new NotImplementedException(); + public GeneralizedTensorShape OutputSize => throw new NotImplementedException(); + public bool SupportOptionalArgs => throw new NotImplementedException(); } } diff --git a/src/TensorFlowNET.Core/Operations/logging_ops.cs b/src/TensorFlowNET.Core/Operations/logging_ops.cs index e38e60b5b..3303cadc3 100644 --- a/src/TensorFlowNET.Core/Operations/logging_ops.cs +++ b/src/TensorFlowNET.Core/Operations/logging_ops.cs @@ -30,7 +30,7 @@ public Tensor print_v2(Tensor input, string output_stream = "stderr", string end name: name); return tf.Context.ExecuteOp("PrintV2", name, new ExecuteOpArgs(formatted_string) - .SetAttributes(new { output_stream, end })); + .SetAttributes(new { output_stream, end })).SingleOrNull; } } } diff --git a/src/TensorFlowNET.Core/Operations/sort_ops.cs b/src/TensorFlowNET.Core/Operations/sort_ops.cs index 34b903230..db38a073b 100644 --- a/src/TensorFlowNET.Core/Operations/sort_ops.cs +++ b/src/TensorFlowNET.Core/Operations/sort_ops.cs @@ -44,7 +44,7 @@ public static Tensor argsort(Tensor values, Axis axis = null, string direction = { sorted = true })); - return indices; + return indices.Single; } public static Tensor sort(Tensor values, Axis axis, string direction = "ASCENDING", string? name = null) diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index 09f5b0770..b08b2e2b7 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -114,4 +114,9 @@ https://tensorflownet.readthedocs.io + + + + + diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs index d063ee39f..caa36b761 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensors.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs @@ -23,6 +23,38 @@ public class Tensors : IEnumerable, IDisposable public Graph graph => items.First().graph; public bool IsList { get; set; } public int Length => items.Count(); + /// + /// Return a Tensor if `Tensors` has only one tensor, otherwise throw an exception. + /// + public Tensor Single + { + get + { + if (Length != 1) + { + throw new ValueError("Tensors with more than one tensor cannot be " + + "implicitly converted to Tensor."); + } + return items.First(); + } + } + + /// + /// Return a Tensor if `Tensors` has only one tensor, and return null when `Tensors` is empty, + /// otherwise throw an exception. + /// + public Tensor? SingleOrNull + { + get + { + if (Length > 1) + { + throw new ValueError($"Tensors with {Length} tensor cannot be " + + "implicitly converted to Tensor."); + } + return items.FirstOrDefault(); + } + } public Tensor this[int index] { @@ -183,18 +215,18 @@ public static implicit operator Tensors(Tensor[] tensors) public static implicit operator Tensors(List tensors) => new Tensors(tensors.ToArray()); - public static implicit operator Tensor(Tensors tensors) - => tensors.FirstOrDefault(); + public static implicit operator Tensor(Tensors? tensors) + => tensors?.SingleOrNull; public static implicit operator Tensor[](Tensors tensors) => tensors.items.ToArray(); #endregion - public void Deconstruct(out Tensor a, out Tensor b) + public void Deconstruct(out Tensor a, out Tensors? b) { a = items[0]; - b = items[1]; + b = Length == 1? null : new Tensors(items.Skip(1)); } private static void EnsureSingleTensor(Tensors tensors, string methodnName) diff --git a/src/TensorFlowNET.Core/Util/nest.py.cs b/src/TensorFlowNET.Core/Util/nest.py.cs index eb94f4d05..ab6f56b3e 100644 --- a/src/TensorFlowNET.Core/Util/nest.py.cs +++ b/src/TensorFlowNET.Core/Util/nest.py.cs @@ -170,6 +170,39 @@ private static object _sequence_like(object instance, IEnumerable args) throw new TypeError("Type of sequence not supported (yet): " + instance.GetType()); } + public static bool is_nested(object obj) + { + // Refer to https://www.tensorflow.org/api_docs/python/tf/nest + //if (obj is IList || obj is IDictionary || obj is ITuple) + // return true; + if (obj is IList || obj is IDictionary) + return true; + + if (obj is NDArray || obj is Tensor || obj is string || obj.GetType().IsGenericType + || obj is ISet || obj is ISet || obj is ISet) + return false; + + if (obj.GetType().IsNested) return true; + // Check if the object is an IEnumerable + if (obj is IEnumerable) + { + // If it is, check if it is a nested structure + foreach (object item in (IEnumerable)obj) + { + if (is_nested(item)) + { + return true; + } + } + return true; + } + else + { + // If it is not, return false + return false; + } + } + /// /// Yields the next value from the given iterable. /// diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index 80403ad6a..a7c1bcadf 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -22,6 +22,7 @@ limitations under the License. using Tensorflow.Graphs; using static Tensorflow.Binding; using static Tensorflow.Graphs.SubGraphUtility; +using Tensorflow.Util; namespace Tensorflow.Keras { @@ -450,5 +451,514 @@ public Tensor conv2d_transpose(Tensor x, return x; } + + public static (Tensors, Tensors, Tensors) rnn( + Func step_function, // args:inputs, states, return:output, new_states + Tensors inputs, // inputs is a tuple of tensors (one per input sequence) + Tensors initial_states, + bool go_backwards = false, + Tensor? mask = null, + Tensors? constants = null, + bool unroll = false, + Tensors? input_length = null, // An integer or a 1-D Tensor,depending on whether the time dimension is fixed-length or not + bool time_major = false, + bool zero_output_for_mask = false, + bool return_all_outputs = true) + { + + Tensors swap_batch_timestep(Tensors input_t) + { + var axes = Enumerable.Range(0, input_t.rank).ToArray(); + axes[0] = 1; + axes[1] = 0; + return tf.transpose(input_t, axes); + } + + if (!time_major) + { + inputs = nest.map_structure(swap_batch_timestep, inputs); + } + + var flatted_inptus = nest.flatten(inputs); + var time_steps = flatted_inptus[0].shape[0]; + var batch = flatted_inptus[0].shape[1]; + var time_step_t = tf.shape(flatted_inptus[0])[0]; + + foreach (var input_ in flatted_inptus) + { + input_.shape.with_rank_at_least(3); + } + + if (mask != null) + { + if (mask.dtype != TF_DataType.TF_BOOL) + { + mask = tf.cast(mask, TF_DataType.TF_BOOL); + } + + if (mask.rank == 2) + { + mask = tf.expand_dims(mask, -1); + } + + if (!time_major) + { + mask = swap_batch_timestep(mask); + } + + } + + if (constants == null) + { + constants = new List(); + } + + // tf.where needs its condition tensor to be the same shape as its two + // result tensors, but in our case the condition (mask) tensor is + // (nsamples, 1), and inputs are (nsamples, ndimensions) or even more. + // So we need to broadcast the mask to match the shape of inputs. + // That's what the tile call does, it just repeats the mask along its + // second dimension n times. + + Tensors _expand_mask(Tensors mask_t, Tensors input_t, int fixed_dim = 1) + { + if (nest.is_nested(mask_t)) + { + throw new ValueError($"mask_t is expected to be tensor, but got {mask_t}"); + } + + if (nest.is_nested(input_t)) + { + throw new ValueError($"input_t is expected to be tensor, but got {input_t}"); + } + + var rank_diff = input_t.rank - mask_t.rank; + for (int i = 0; i < rank_diff; i++) + { + mask_t = tf.expand_dims(mask_t, -1); + } + var multiples = Enumerable.Repeat(1, fixed_dim).ToArray().concat(input_t.shape.as_int_list().ToList().GetRange(fixed_dim, input_t.rank)); + return tf.tile(mask_t, multiples); + } + + Tensors outputs = new Tensors(); + Tensors output_time_zero = new Tensors(); + Tensors last_output = new Tensors(); + Tensors new_states = new Tensors(); + if (unroll) + { + if (time_steps == 0) + { + throw new ValueError("Unrolling requires a fixed number of timesteps."); + } + + // Process the input tensors. The input tensor need to be split on the + // time_step dim, and reverse if go_backwards is True. In the case of + // nested input, the input is flattened and then transformed + // individually. The result of this will be a tuple of lists, each of + // the item in tuple is list of the tensor with shape (batch, feature) + + + // TODO(Wanglongzhi2001),step_func接受的第二个参数为List,但是最后却用的tuple + //var states = Tuple.Create(initial_states); + var states = initial_states; + + var successive_states = new Tensors(); + var successive_outputs = new Tensors(); + + // Process the input tensors. The input tensor need to be split on the + // time_step dim, and reverse if go_backwards is True. In the case of + // nested input, the input is flattened and then transformed + // individually. The result of this will be a tuple of lists, each of + // the item in tuple is list of the tensor with shape (batch, feature) + + + + + Tensors _process_single_input_t(Tensors input_t) + { + input_t = tf.unstack(input_t); // unstack for time_step dim + if (go_backwards) + { + input_t.Reverse(); + } + return input_t; + } + + // TODO(Wanglongzhi2001) + Tensors processed_input; + if (nest.is_nested(inputs)) + { + processed_input = nest.map_structure(_process_single_input_t, inputs); + } + else + { + processed_input = _process_single_input_t(inputs); + } + + object _get_input_tensor(int time) + { + List inp = new List(); + foreach (var t_ in processed_input) + { + inp.Add(t_[time]); + } + return nest.pack_sequence_as(inputs, inp); + } + + //if (mask != null) + //{ + // var mask_list = tf.unstack(mask); + // if (go_backwards) + // { + // mask_list.Reverse(); + // } + + // for (int i = 0; i < time_steps; i++) + // { + // // TODO(Wanglongzhi2001),deal with _get_input_tensor + // var inp = _get_input_tensor(i); + // var mask_t = mask_list[i]; + // // TODO + // var (output, newStates) = step_function((Tensors)inp, new Tensors { states, constants }); + + // var tiled_mask_t = _expand_mask(mask_t, output); + + // Tensors prev_output; + // if (successive_outputs == null) + // { + // prev_output = tf.zeros_like(output); + // } + // else + // { + // prev_output = successive_outputs[successive_outputs.Length - 1]; + // } + + // output = tf.where(tiled_mask_t, output, prev_output); + + // //var flat_states = nest.flatten(states); + // //var flat_new_states = nest.flatten(newStates); + // var flat_states = states.ToList(); + // var flat_new_states = newStates.ToList(); + + // var tiledMaskT = flat_states + // .Select(s => _expand_mask(mask_t, s)) + // .ToArray(); + // var tuple = Tuple.Create(tiledMaskT); + + // List flat_final_states = new List(); + // foreach (var (m, s, ps) in Enumerable.Zip(tiled_mask_t, flat_new_states, flat_states)) + // { + // flat_final_states.Add(tf.where(m, s, ps)); + // } + + // states = (Tensors)nest.pack_sequence_as(states, flat_final_states); + // if (return_all_outputs) + // { + // successive_outputs.Add(output); + // successive_states.Add(states); + // } + // else + // { + // successive_outputs = new Tensors { output }; + // successive_states = new Tensors { states }; + // } + + // } + // last_output = successive_outputs[successive_outputs.Length - 1]; + // new_states = successive_states[successive_states.Length - 1]; + // outputs = tf.stack(successive_outputs); + + // if (zero_output_for_mask) + // { + // last_output = tf.where(_expand_mask(mask_list[mask_list.Length - 1], last_output), last_output, tf.zeros_like(last_output)); + // outputs = tf.where(_expand_mask(mask, outputs, fixed_dim: 2), outputs, tf.zeros_like(outputs)); + // } + // else // mask is null + // { + // for (int i = 0; i < time_steps; i++) + // { + // var inp = _get_input_tensor(i); + // var (output, newStates) = step_function((Tensors)inp, new Tensors { states, constants }); + // states = newStates; + + // if (return_all_outputs) + // { + // successive_outputs.Add(output); + // successive_states.Add(newStates); + // } + // else + // { + // successive_outputs = new Tensors { output }; + // successive_states = new Tensors { newStates }; + // } + // } + // last_output = successive_outputs[successive_outputs.Length - 1]; + // new_states = successive_states[successive_states.Length - 1]; + // outputs = tf.stack(successive_outputs); + // } + //} + } + //else // unroll == false + //{ + // var states = initial_states; + // // Create input tensor array, if the inputs is nested tensors, then it + // // will be flattened first, and tensor array will be created one per + // // flattened tensor. + // var input_ta = new List(); + // for (int i = 0; i < flatted_inptus.Count; i++) + // { + // input_ta.Add(tf.TensorArray(dtype: flatted_inptus[i].dtype, size: time_step_t)); + // } + + // // Get the time(0) input and compute the output for that, the output will + // // be used to determine the dtype of output tensor array. Don't read from + // // input_ta due to TensorArray clear_after_read default to True. + // var inps = new Tensors(); + // foreach (var inp in flatted_inptus) + // { + // inps.Add(inp[0]); + // } + // var input_time_zero = nest.pack_sequence_as(inputs, inps); + + // // output_time_zero is used to determine the cell output shape and its + // // dtype. the value is discarded. + // (output_time_zero, _) = step_function((Tensor)input_time_zero, new Tensors { initial_states, constants }); + + // var output_ta_size = return_all_outputs ? time_step_t : tf.constant(1); + // var output_ta = new List(); + // for (int i = 0; i < output_time_zero.ToList().Count; i++) + // { + // var Out = output_time_zero.ToList()[i]; + // output_ta.Add(tf.TensorArray(dtype: Out.dtype, size: output_ta_size, element_shape: Out.shape)); + // } + + // var time = tf.constant(0, dtype: TF_DataType.TF_INT32, name: "time"); + + + + // Func? masking_fn; + // Func? compute_masked_output = null; + // if (mask != null) + // { + // if (go_backwards) + // { + // mask = tf.reverse(mask, axis: new[] { 0 }); + // } + // var mask_ta = tf.TensorArray(dtype: TF_DataType.TF_BOOL, size: time_step_t); + // mask_ta = mask_ta.unstack(mask); + + // masking_fn = (time) => + // { + // return mask_ta.read(time); + // }; + + // compute_masked_output = (mask_t, flat_out, flat_mask) => + // { + // var tiled_mask_t = new Tensors(); + // foreach (var o in flat_out) + // { + // tiled_mask_t.Add(_expand_mask(mask_t, o, fixed_dim: mask_t.rank)); + // } + + // Tensors res = new Tensors(); + // foreach (var (m, o, fm) in Enumerable.Zip(tiled_mask_t, flat_out, flat_mask)) + // { + // res.Add(tf.where(m, o, fm)); + // } + // return res; + // }; + // } + // // TODO(Wanglongzhi2001), what the input_length's type should be(an integer or a single tensor)? + // else if (input_length is Tensor) + // { + // if (go_backwards) + // { + // var max_len = tf.reduce_max(input_length, axis: 0); + // var rev_input_length = tf.subtract(max_len - 1, input_length); + + // masking_fn = (time) => + // { + // return tf.less(rev_input_length, time); + // }; + // } + // else + // { + // masking_fn = (time) => + // { + // return tf.greater(input_length, time); + // }; + // } + + // compute_masked_output = (mask_t, flat_out, flat_mask) => + // { + // var res = new List(); + // foreach (var (o, zo) in zip(flat_out, flat_mask)) + // { + // res.Add(tf.where(mask_t, o, zo)); + // } + // return res; + // }; + // } + // else + // { + // masking_fn = null; + // } + + + // if (masking_fn != null) + // { + // // Mask for the T output will be base on the output of T - 1. In the + // // case T = 0, a zero filled tensor will be used. + // var flat_zero_output = new Tensors(); + // foreach (var o in nest.flatten(output_time_zero)) + // { + // flat_zero_output.Add(tf.zeros_like(o)); + // } + + + // (Tensor, List, Tensors, Tensors) _step(Tensor time, List output_ta_t, Tensors prev_output, Tensors states) + // { + // /* + // RNN step function. + // Args: + // time: Current timestep value. + // output_ta_t: TensorArray. + // prev_output: tuple of outputs from time - 1. + // *states: List of states. + // Returns: + // Tuple(todo): `(time + 1, output_ta_t, output) + tuple(new_states)` + // */ + + // var current_input = input_ta.Select(x => x.read(time)).ToList(); + // // maybe set shape + // // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type + // current_input = (List)nest.pack_sequence_as(inputs, current_input); + // var mask_t = masking_fn(time); + // var (output, new_states) = step_function(current_input, new Tensors { states, constants }); + // // mask output + // //var flat_output = nest.flatten(output); + // var flat_output = output.ToList(); + + // var flat_mask_output = zero_output_for_mask ? flat_zero_output : prev_output.ToList(); + + // // TODO(Wanglongzhi2001),deal with compute_masked_output's third parameter's type + // var flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output); + + // // mask states + // var flat_state = states.ToList(); + // var flat_new_state = new_states.ToList(); + + // foreach (var (state, new_state) in zip(flat_state, flat_new_state)) + // { + // if (new_state is Tensor) + // { + // new_state.set_shape(state.shape); + // } + // } + + // var flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state); + // new_states = (Tensors)nest.pack_sequence_as(new_states, flat_final_state); + + // var ta_index_to_write = return_all_outputs ? time : tf.constant(0); + // var Output_ta_t = new List(); + // // TODO(Wanglongzhi2001),deal with zip output_ta_t + // foreach (var (ta, Out) in zip(output_ta_t, flat_new_output)) + // { + // Output_ta_t.Add(ta.write(ta_index_to_write, Out)); + // } + + + + // //new_states = (Tensors)nest.pack_sequence_as(initial_states, flat_new_state); + + + // return (time + 1, Output_ta_t, flat_new_output, new_states); + + // } + // Func cond = (time) => (time < time_step_t); + + // var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: (time, output_ta, flat_zero_output, states)); + // new_states = final_outputs.Item4; + // output_ta = final_outputs.Item2; + + // } + // else + // { + // (Tensor, List, Tensors) _step(Tensor time, List output_ta_t, Tensors states) + // { + // var current_input = input_ta.Select(x => x.read(time)).ToList(); + // // maybe set shape + // // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type + // current_input = (List)nest.pack_sequence_as(inputs, current_input); + // var (output, new_states) = step_function(current_input, new Tensors { states, constants }); + // var flat_state = states.ToList(); + // var flat_new_state = new_states.ToList(); + // foreach (var (state, new_state) in zip(flat_state, flat_new_state)) + // { + // if (new_state is Tensor) + // { + // new_state.set_shape(state.shape); + // } + // } + // var flat_output = output.ToList(); + // var ta_index_to_write = return_all_outputs ? time : tf.constant(0); + // var Output_ta_t = new List(); + // foreach (var (ta, out_) in zip(output_ta_t, flat_output)) + // { + // Output_ta_t.Add(ta.write(ta_index_to_write, out_)); + // } + + // new_states = (Tensors)nest.pack_sequence_as(initial_states, flat_new_state); + // return (time + 1, Output_ta_t, new_states); + // } + // Func cond = (time) => (time < time_step_t); + // var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: (time, output_ta, states)); + // new_states = final_outputs.Item3; + // output_ta = final_outputs.Item2; + + // } + // //Tensors outputs = new Tensors(); + // foreach (var o in output_ta) + // { + // outputs.Add(o.stack()); + // } + // foreach (var o in outputs) + // { + // last_output.Add(o[-1]); + // } + // outputs = (Tensors)nest.pack_sequence_as(output_time_zero, outputs); + // last_output = (Tensors)nest.pack_sequence_as(output_time_zero, last_output); + + //} + + Func set_shape; + set_shape = (output_) => + { + if (output_ is Tensor) + { + var shape = output_.shape.as_int_list(); + if (return_all_outputs) + { + shape[0] = (int)time_steps; + } + else + { + shape[0] = 1; + } + shape[1] = (int)batch; + output_.set_shape(new Tensor(shape)); + } + return output_; + }; + + var Outputs = (Tensors)nest.map_structure(set_shape, outputs); + if (!time_major) + { + Outputs = nest.map_structure(swap_batch_timestep, outputs); + } + return (last_output, Outputs, new_states); + + } } } diff --git a/src/TensorFlowNET.Keras/Engine/Functional.cs b/src/TensorFlowNET.Keras/Engine/Functional.cs index e768bd0bd..7347585f8 100644 --- a/src/TensorFlowNET.Keras/Engine/Functional.cs +++ b/src/TensorFlowNET.Keras/Engine/Functional.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Saving.SavedModel; using Tensorflow.Keras.Utils; @@ -81,7 +82,7 @@ protected void _init_graph_network(Tensors inputs, Tensors outputs) } else { - _buildInputShape = new Saving.TensorShapeConfig(); + _buildInputShape = new TensorShapeConfig(); } if (outputs.Any(x => x.KerasHistory == null)) @@ -325,7 +326,7 @@ void BuildMapHelper(Tensor tensor, nodes_in_decreasing_depth.append(node); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { var tensor_dict = new Dictionary>(); // map input values diff --git a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs index c04304580..a0358f074 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs @@ -1,4 +1,5 @@ using System.Threading; +using Tensorflow.Common.Types; using static Tensorflow.Binding; namespace Tensorflow.Keras.Engine @@ -8,11 +9,11 @@ public partial class Layer /// /// Wraps `call`, applying pre- and post-processing steps. /// - /// + /// /// /// /// - public Tensors Apply(Tensors inputs, Tensor state = null, bool training = false) + public virtual Tensors Apply(Tensors inputs, Tensors states = null, bool training = false, IOptionalArgs? optional_args = null) { if (callContext.Value == null) callContext.Value = new CallContext(); @@ -30,7 +31,7 @@ public Tensors Apply(Tensors inputs, Tensor state = null, bool training = false) if (!built) MaybeBuild(inputs); - var outputs = Call(inputs, state: state, training: training); + var outputs = Call(inputs, state: states, training: training); // memory leak // _set_connectivity_metadata_(inputs, outputs); diff --git a/src/TensorFlowNET.Keras/Engine/Layer.cs b/src/TensorFlowNET.Keras/Engine/Layer.cs index 5942efd92..2f758a850 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.cs @@ -32,7 +32,7 @@ limitations under the License. using static Tensorflow.Binding; using Tensorflow.Framework; using Tensorflow.Sessions; - +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Engine { @@ -332,7 +332,7 @@ private Tensor compute_mask(Tensor inputs, Tensor mask = null) /// /// /// - protected virtual Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected virtual Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if(ReplacedCall is not null) { diff --git a/src/TensorFlowNET.Keras/Engine/Model.cs b/src/TensorFlowNET.Keras/Engine/Model.cs index 83702b23a..7b35d5477 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.cs @@ -1,8 +1,8 @@ using System.Diagnostics; +using Tensorflow.Common.Types; using Tensorflow.Framework.Models; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Losses; -using Tensorflow.Keras.Saving; using Tensorflow.Keras.Saving.SavedModel; using Tensorflow.Keras.Utils; using Tensorflow.Train; diff --git a/src/TensorFlowNET.Keras/Engine/Sequential.cs b/src/TensorFlowNET.Keras/Engine/Sequential.cs index 278747515..6a468ad27 100644 --- a/src/TensorFlowNET.Keras/Engine/Sequential.cs +++ b/src/TensorFlowNET.Keras/Engine/Sequential.cs @@ -21,6 +21,7 @@ limitations under the License. using Tensorflow.Keras.Layers; using Tensorflow.Keras.Utils; using static Tensorflow.KerasApi; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Engine { @@ -143,7 +144,7 @@ public void add(ILayer layer) } } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (!_has_explicit_input_shape) { diff --git a/src/TensorFlowNET.Keras/Layers/Activation/ELU.cs b/src/TensorFlowNET.Keras/Layers/Activation/ELU.cs index 739c0d56f..23f36c862 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/ELU.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/ELU.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Text; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; @@ -29,7 +30,7 @@ public override void build(KerasShapesWrapper input_shape) base.build(input_shape); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor output = inputs; output = tf.where(output > 0f, output, diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs b/src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs index 17636302f..81fefb314 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs @@ -4,7 +4,7 @@ using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; -using static Tensorflow.Binding; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { public class Exponential : Layer @@ -17,7 +17,7 @@ public override void build(KerasShapesWrapper input_shape) { base.build(input_shape); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor output = inputs; return tf.exp(output); diff --git a/src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs b/src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs index b498d1b94..e0f91380b 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs @@ -3,6 +3,7 @@ using System.Text; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; +using Tensorflow.Common.Types; using static Tensorflow.Binding; namespace Tensorflow.Keras.Layers { @@ -10,7 +11,7 @@ public class HardSigmoid : Layer { public HardSigmoid ( LayerArgs args ) : base(args) { // hard sigmoid has no arguments } - protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { + protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null ) { Tensor x = inputs; return tf.clip_by_value( tf.add(tf.multiply(x, 0.2f), 0.5f), 0f, 1f); diff --git a/src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs b/src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs index 1fbbf4eaf..cfbd0186d 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs @@ -3,6 +3,7 @@ using System.Text; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; +using Tensorflow.Common.Types; using static Tensorflow.Binding; namespace Tensorflow.Keras.Layers @@ -19,7 +20,7 @@ public LeakyReLu(LeakyReLuArgs args) : base(args) this.args = args; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { return tf.nn.leaky_relu(inputs, alpha: alpha); } diff --git a/src/TensorFlowNET.Keras/Layers/Activation/SELU.cs b/src/TensorFlowNET.Keras/Layers/Activation/SELU.cs index 53101fbb4..2e943d5f7 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/SELU.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/SELU.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Text; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; @@ -22,7 +23,7 @@ public override void build(KerasShapesWrapper input_shape) { } base.build(input_shape); } - protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { + protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor output = inputs; return tf.where(output > 0f, tf.multiply(scale, output), diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs b/src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs index 3ffae27f6..d018128d5 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Text; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using static Tensorflow.Binding; @@ -11,8 +12,8 @@ public class Softmax : Layer { public Softmax ( SoftmaxArgs args ) : base(args) { axis = args.axis; } - protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { - Tensor x = inputs.Length == 2 ? inputs + ((1.0 - tf.cast(inputs[1], inputs.dtype)) * 1e-9) + protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { + Tensor x = inputs.Length == 2 ? inputs[0] + ((1.0 - tf.cast(inputs[1], inputs.dtype)) * 1e-9) : inputs; Tensor e = tf.exp(tf.sub(x, tf.reduce_max(x, axis: this.axis, keepdims: true))); Tensor s = tf.reduce_sum(e, axis: this.axis, keepdims: true); diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs b/src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs index e82b01982..1e6c59b42 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Text; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using static Tensorflow.Binding; @@ -10,7 +11,7 @@ public class Softplus : Layer { public Softplus ( LayerArgs args ) : base(args) { // Softplus has no arguments } - protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { + protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor x = inputs; return tf.log( tf.add(tf.exp(x), 1f)); diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs b/src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs index 59329fd44..5ad33e99d 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Text; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using static Tensorflow.Binding; @@ -10,7 +11,7 @@ public class Softsign : Layer { public Softsign ( LayerArgs args ) : base(args) { // Softsign has no arguments } - protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { + protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor x = inputs; // x / (abs(x) + 1) return tf.div(x, tf.add(1f, tf.abs(x))); diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Swish.cs b/src/TensorFlowNET.Keras/Layers/Activation/Swish.cs index 1dcb92b31..ed0d105a6 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/Swish.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/Swish.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Text; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using static Tensorflow.Binding; @@ -10,7 +11,7 @@ public class Swish : Layer { public Swish ( LayerArgs args ) : base(args) { // Swish has no arguments } - protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { + protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor x = inputs; // x / (1 + exp(-x)) diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs b/src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs index 99b803942..7e90cf9d8 100644 --- a/src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs +++ b/src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs @@ -3,6 +3,7 @@ using System.Text; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; +using Tensorflow.Common.Types; using static Tensorflow.Binding; namespace Tensorflow.Keras.Layers @@ -13,7 +14,7 @@ public Tanh(LayerArgs args) : base(args) { // Tanh has no arguments } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor x = inputs; diff --git a/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs b/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs index 1348e19cf..19b292727 100644 --- a/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs +++ b/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs @@ -6,6 +6,7 @@ using System.Collections.Generic; using System.Linq; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; /// /// Base class for attention layers that can be used in sequence DNN/CNN models. @@ -114,7 +115,7 @@ public virtual Tensor _calculate_scores(Tensor query, Tensor key) => return (tf.linalg.einsum("bij,bjk->bik", (weights, value)), weights); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensors _inp; Tensors _mask = null; diff --git a/src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs b/src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs index 701724d5b..75dd4a41a 100644 --- a/src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs +++ b/src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs @@ -6,6 +6,7 @@ using static Tensorflow.KerasApi; using System; using System.Linq; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -252,7 +253,7 @@ public Tensors _compute_attention( return (attention_output, attention_scores); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensors _inp; Tensor _mask = null; @@ -349,7 +350,7 @@ protected Tensors call(Tensors inputs, //} if (return_attention_scores) - return (attention_output, attention_scores); + return (attention_output, attention_scores.Single); return attention_output; } } diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs b/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs index bbd49acd2..94ad79141 100644 --- a/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs +++ b/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs @@ -20,6 +20,7 @@ limitations under the License. using Tensorflow.Keras.Utils; using static Tensorflow.KerasApi; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -83,7 +84,7 @@ public override void build(KerasShapesWrapper input_shape) _buildInputShape = input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { var inputs_shape = array_ops.shape(inputs); var batch_size = inputs_shape[0]; diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs b/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs index c575362c0..d8e00d520 100644 --- a/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs +++ b/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs @@ -17,6 +17,7 @@ limitations under the License. using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; @@ -103,7 +104,7 @@ public override void build(KerasShapesWrapper input_shape) _buildInputShape = input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = false) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = false, IOptionalArgs? optional_args = null) { var outputs = _convolution_op.Apply(inputs, kernel.AsTensor()); if (use_bias) diff --git a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs index aa6617ddc..db5d626ed 100644 --- a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs +++ b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs @@ -18,6 +18,7 @@ limitations under the License. using System.Collections.Generic; using System.Diagnostics; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; @@ -69,7 +70,7 @@ public override void build(KerasShapesWrapper input_shape) built = true; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor outputs = null; var rank = inputs.rank; diff --git a/src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs b/src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs index fb604f77e..0cbd50846 100644 --- a/src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs +++ b/src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs @@ -7,6 +7,7 @@ using Tensorflow.Keras.Engine; using Tensorflow.Keras.ArgsDefinition.Core; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -189,7 +190,7 @@ public override Shape ComputeOutputShape(Shape input_shape) // return new dict(base_config.items().ToList() + config.items().ToList()); //} - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { var ret = tf.linalg.einsum(this.equation, (inputs, this.kernel.AsTensor())); if (this.bias != null) diff --git a/src/TensorFlowNET.Keras/Layers/Core/Embedding.cs b/src/TensorFlowNET.Keras/Layers/Core/Embedding.cs index 9487a7d00..87b42bb7b 100644 --- a/src/TensorFlowNET.Keras/Layers/Core/Embedding.cs +++ b/src/TensorFlowNET.Keras/Layers/Core/Embedding.cs @@ -15,6 +15,7 @@ limitations under the License. ******************************************************************************/ using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; @@ -66,7 +67,7 @@ public override void build(KerasShapesWrapper input_shape) _buildInputShape = input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { var dtype = inputs.dtype; if (dtype != tf.int32 && dtype != tf.int64) diff --git a/src/TensorFlowNET.Keras/Layers/Merging/Merge.cs b/src/TensorFlowNET.Keras/Layers/Merging/Merge.cs index 7df654eeb..bcbb20d88 100644 --- a/src/TensorFlowNET.Keras/Layers/Merging/Merge.cs +++ b/src/TensorFlowNET.Keras/Layers/Merging/Merge.cs @@ -5,6 +5,7 @@ using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -21,7 +22,7 @@ public override void build(KerasShapesWrapper input_shape) _buildInputShape = input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { return _merge_function(inputs); } diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs index d02d2509c..655581576 100644 --- a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs +++ b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs @@ -17,6 +17,7 @@ limitations under the License. using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; @@ -146,7 +147,7 @@ bool _support_zero_size_input() return false; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor outputs = null; var training_tensor = training == null diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs index e90c04029..1898f24c8 100644 --- a/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs +++ b/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs @@ -17,6 +17,7 @@ limitations under the License. using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; @@ -101,7 +102,7 @@ public override Shape ComputeOutputShape(Shape input_shape) return input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor outputs = null; var inputs_dtype = inputs.dtype.as_base_dtype(); diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs index a65154bf4..987b56bc4 100644 --- a/src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs +++ b/src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs @@ -14,6 +14,7 @@ You may obtain a copy of the License at limitations under the License. ******************************************************************************/ +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Saving; @@ -157,7 +158,7 @@ public override void adapt(Tensor data, int? batch_size = null, int? steps = nul base.adapt(data, batch_size: batch_size, steps: steps); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (_args.Invert) { diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs index d62fb63a4..ffaabec97 100644 --- a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Text; using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -12,7 +13,7 @@ public GlobalAveragePooling1D(Pooling1DArgs args) { } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (data_format == "channels_last") return math_ops.reduce_mean(inputs, 1, false); diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs index 000e4b8b9..e06665173 100644 --- a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Text; using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -12,7 +13,7 @@ public GlobalAveragePooling2D(Pooling2DArgs args) { } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (data_format == "channels_last") return math_ops.reduce_mean(inputs, (1, 2), false); diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs index 2de4671ca..15695e8a7 100644 --- a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Text; using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -12,7 +13,7 @@ public GlobalMaxPooling1D(Pooling1DArgs args) { } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (data_format == "channels_last") return math_ops.reduce_max(inputs, 1, false); diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs index b7e2c9452..76db858da 100644 --- a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Text; using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -12,7 +13,7 @@ public GlobalMaxPooling2D(Pooling2DArgs args) { } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (data_format == "channels_last") return math_ops.reduce_max(inputs, (1, 2), false); diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs index a2f4c51b6..81a340199 100644 --- a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs +++ b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs @@ -18,6 +18,7 @@ limitations under the License. using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Utils; +using Tensorflow.Common.Types; using static Tensorflow.Binding; namespace Tensorflow.Keras.Layers @@ -36,7 +37,7 @@ public Pooling1D(Pooling1DArgs args) input_spec = new InputSpec(ndim: 3); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { int pad_axis = args.DataFormat == "channels_first" ? 2 : 3; inputs = tf.expand_dims(inputs, pad_axis); diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs index 270322559..f83f1e152 100644 --- a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs +++ b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs @@ -17,6 +17,7 @@ limitations under the License. using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Utils; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -36,7 +37,7 @@ public Pooling2D(Pooling2DArgs args) input_spec = new InputSpec(ndim: 4); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { int[] pool_shape; int[] strides; diff --git a/src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs b/src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs index 5620a916c..20d2a53d5 100644 --- a/src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs +++ b/src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs @@ -1,6 +1,6 @@ using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; - +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { /// @@ -15,7 +15,7 @@ public CategoryEncoding(CategoryEncodingArgs args) : base(args) this.args = args; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { var depth = args.NumTokens; var max_value = tf.reduce_max(inputs); diff --git a/src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs b/src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs index 5fc581af9..7fa367eea 100644 --- a/src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs +++ b/src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs @@ -1,5 +1,6 @@ using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -17,7 +18,7 @@ public Rescaling(RescalingArgs args) : base(args) this.args = args; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { scale = constant_op.constant(args.Scale, args.DType); offset = constant_op.constant(args.Offset, args.DType); diff --git a/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs b/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs index 603e2b071..081966ad4 100644 --- a/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs +++ b/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs @@ -4,6 +4,7 @@ using System.Text; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -19,7 +20,7 @@ public Resizing(ResizingArgs args) : base(args) this.args = args; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { return image_ops_impl.resize_images_v2(inputs, new[] { args.Height, args.Width }, method: args.Interpolation); } diff --git a/src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs b/src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs index aa3a92a49..ada1851ce 100644 --- a/src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs +++ b/src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs @@ -1,4 +1,5 @@ -using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Common.Types; +using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Utils; using static Tensorflow.Binding; @@ -15,7 +16,7 @@ public Dropout(DropoutArgs args) this.args = args; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (training == null) training = false; diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs index 9ead15cb5..312854388 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs @@ -1,6 +1,8 @@ using Tensorflow.Keras.ArgsDefinition.Reshaping; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers.Reshaping { @@ -27,7 +29,7 @@ public override void build(KerasShapesWrapper input_shape) _buildInputShape = input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor output = inputs; if (output.rank != 3) diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs index 087d59a14..4a5c6eabc 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs @@ -1,6 +1,7 @@ using Tensorflow.Keras.ArgsDefinition.Reshaping; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers.Reshaping { @@ -21,7 +22,7 @@ public override void build(KerasShapesWrapper input_shape) built = true; _buildInputShape = input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor output = inputs; if (output.rank != 4) diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs index 04a1af600..83f86c6fc 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs @@ -1,6 +1,7 @@ using Tensorflow.Keras.ArgsDefinition.Reshaping; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers.Reshaping { @@ -21,7 +22,7 @@ public override void build(KerasShapesWrapper input_shape) _buildInputShape = input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor output = inputs; if (output.rank != 5) diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs index 539b5f624..a6192849d 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs @@ -1,5 +1,6 @@ using System; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Framework; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; @@ -23,7 +24,7 @@ public Flatten(FlattenArgs args) _channels_first = args.DataFormat == "channels_first"; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (_channels_first) { diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs index e391775c8..7fdb816bf 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs @@ -6,6 +6,7 @@ using static Tensorflow.Binding; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { public class Permute : Layer @@ -28,7 +29,7 @@ public override void build(KerasShapesWrapper input_shape) built = true; _buildInputShape = input_shape; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { Tensor outputs = inputs; return tf.transpose(outputs, new Axis(permute)); diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs index 92a772f34..4b3d30e29 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System; using System.Linq; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -19,7 +20,7 @@ public Reshape(ReshapeArgs args) this.args = args; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { var shapes = new List(); shapes.Add(array_ops.shape(inputs)[0]); diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs index 8314151f6..223f33d4f 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs @@ -6,6 +6,7 @@ using Tensorflow.Keras.Utils; using static Tensorflow.Binding; using static Tensorflow.KerasApi; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -24,7 +25,7 @@ public UpSampling2D(UpSampling2DArgs args) : base(args) inputSpec = new InputSpec(ndim: 4); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { return keras.backend.resize_images(inputs, size[0], size[1], diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs index 7c87100a2..3b37dac46 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs @@ -2,6 +2,7 @@ using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Utils; +using Tensorflow.Common.Types; using static Tensorflow.KerasApi; namespace Tensorflow.Keras.Layers @@ -26,7 +27,7 @@ public ZeroPadding2D(ZeroPadding2DArgs args, string data_format = null) this.input_spec = new InputSpec(ndim: 4); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { return keras.backend.spatial_2d_padding(inputs, padding: padding, diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs new file mode 100644 index 000000000..21396853f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs @@ -0,0 +1,85 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Common.Types; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.Engine; + +namespace Tensorflow.Keras.Layers.Rnn +{ + public abstract class DropoutRNNCellMixin: RnnCellBase + { + public float dropout; + public float recurrent_dropout; + // TODO(Rinne): deal with cache. + public DropoutRNNCellMixin(LayerArgs args): base(args) + { + + } + + public Tensors? get_dropout_maskcell_for_cell(Tensors input, bool training, int count = 1) + { + if (dropout == 0f) + return null; + return _generate_dropout_mask( + tf.ones_like(input), + dropout, + training, + count); + } + + // Get the recurrent dropout mask for RNN cell. + public Tensors? get_recurrent_dropout_maskcell_for_cell(Tensors input, bool training, int count = 1) + { + if (dropout == 0f) + return null; + return _generate_dropout_mask( + tf.ones_like(input), + recurrent_dropout, + training, + count); + } + + public Tensors _create_dropout_mask(Tensors input, bool training, int count = 1) + { + return _generate_dropout_mask( + tf.ones_like(input), + dropout, + training, + count); + } + + public Tensors _create_recurrent_dropout_mask(Tensors input, bool training, int count = 1) + { + return _generate_dropout_mask( + tf.ones_like(input), + recurrent_dropout, + training, + count); + } + + public Tensors _generate_dropout_mask(Tensor ones, float rate, bool training, int count = 1) + { + Tensors dropped_inputs() + { + DropoutArgs args = new DropoutArgs(); + args.Rate = rate; + var DropoutLayer = new Dropout(args); + var mask = DropoutLayer.Apply(ones, training: training); + return mask; + } + + if (count > 1) + { + Tensors results = new Tensors(); + for (int i = 0; i < count; i++) + { + results.Add(dropped_inputs()); + } + return results; + } + + return dropped_inputs(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs index 59555e62b..1449c908e 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs @@ -1,6 +1,7 @@ using System.Linq; using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers.Rnn { @@ -26,9 +27,9 @@ public LSTM(LSTMArgs args) : .ToArray(); } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { - return base.Call(inputs, state: state, training: training); + return base.Call(inputs, initial_state: state, training: training); } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index 310e80574..b014737f6 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -1,53 +1,466 @@ -using System; +using OneOf; +using System; using System.Collections.Generic; +using System.Reflection; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.Util; +using Tensorflow.Common.Extensions; +using System.Linq.Expressions; +using Tensorflow.Keras.Utils; +using Tensorflow.Common.Types; // from tensorflow.python.distribute import distribution_strategy_context as ds_context; namespace Tensorflow.Keras.Layers.Rnn { - public class RNN : Layer + /// + /// Base class for recurrent layers. + /// See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) + /// for details about the usage of RNN API. + /// + public class RNN : RnnBase { - private RNNArgs args; - private object input_spec = null; // or NoneValue?? - private object state_spec = null; - private object _states = null; - private object constants_spec = null; - private int _num_constants = 0; - protected IVariableV1 kernel; - protected IVariableV1 bias; - protected ILayer cell; + private RNNArgs _args; + private object _input_spec = null; // or NoneValue?? + private object _state_spec = null; + private Tensors _states = null; + private object _constants_spec = null; + private int _num_constants; + protected IVariableV1 _kernel; + protected IVariableV1 _bias; + protected IRnnCell _cell; + public RNN(RNNArgs args) : base(PreConstruct(args)) { - this.args = args; + _args = args; SupportsMasking = true; - // The input shape is unknown yet, it could have nested tensor inputs, and - // the input spec will be the list of specs for nested inputs, the structure - // of the input_spec will be the same as the input. + // if is StackedRnncell + _cell = args.Cell; - //if(stateful) - //{ - // if (ds_context.has_strategy()) // ds_context???? - // { - // throw new Exception("RNNs with stateful=True not yet supported with tf.distribute.Strategy"); - // } - //} + // get input_shape + _args = PreConstruct(args); + + _num_constants = 0; + } + + // States is a tuple consist of cell states_size, like (cell1.state_size, cell2.state_size,...) + // state_size can be a single integer, can also be a list/tuple of integers, can also be TensorShape or a list/tuple of TensorShape + public Tensors States + { + get + { + if (_states == null) + { + // CHECK(Rinne): check if this is correct. + var state = nest.map_structure(x => null, _cell.StateSize); + return new Tensors { state }; + } + return _states; + } + set { _states = value; } + } + + private OneOf> compute_output_shape(Shape input_shape) + { + var batch = input_shape[0]; + var time_step = input_shape[1]; + if (_args.TimeMajor) + { + (batch, time_step) = (time_step, batch); + } + + // state_size is a array of ints or a positive integer + var state_size = _cell.StateSize.ToSingleShape(); + + // TODO(wanglongzhi2001),flat_output_size应该是什么类型的,Shape还是Tensor + Func _get_output_shape; + _get_output_shape = (flat_output_size) => + { + var output_dim = flat_output_size.as_int_list(); + Shape output_shape; + if (_args.ReturnSequences) + { + if (_args.TimeMajor) + { + output_shape = new Shape(new int[] { (int)time_step, (int)batch }.concat(output_dim)); + } + else + { + output_shape = new Shape(new int[] { (int)batch, (int)time_step }.concat(output_dim)); + + } + } + else + { + output_shape = new Shape(new int[] { (int)batch }.concat(output_dim)); + } + return output_shape; + }; + + Type type = _cell.GetType(); + PropertyInfo output_size_info = type.GetProperty("output_size"); + Shape output_shape; + if (output_size_info != null) + { + output_shape = nest.map_structure(_get_output_shape, _cell.OutputSize.ToSingleShape()); + // TODO(wanglongzhi2001),output_shape应该简单的就是一个元组还是一个Shape类型 + output_shape = (output_shape.Length == 1 ? (int)output_shape[0] : output_shape); + } + else + { + output_shape = _get_output_shape(state_size); + } + + if (_args.ReturnState) + { + Func _get_state_shape; + _get_state_shape = (flat_state) => + { + var state_shape = new int[] { (int)batch }.concat(flat_state.as_int_list()); + return new Shape(state_shape); + }; + var state_shape = _get_state_shape(state_size); + + return new List { output_shape, state_shape }; + } + else + { + return output_shape; + } + + } + + private Tensors compute_mask(Tensors inputs, Tensors mask) + { + // Time step masks must be the same for each input. + // This is because the mask for an RNN is of size [batch, time_steps, 1], + // and specifies which time steps should be skipped, and a time step + // must be skipped for all inputs. + + mask = nest.flatten(mask)[0]; + var output_mask = _args.ReturnSequences ? mask : null; + if (_args.ReturnState) + { + var state_mask = new List(); + for (int i = 0; i < len(States); i++) + { + state_mask.Add(null); + } + return new List { output_mask }.concat(state_mask); + } + else + { + return output_mask; + } } public override void build(KerasShapesWrapper input_shape) { - if (!cell.Built) + object get_input_spec(Shape shape) + { + var input_spec_shape = shape.as_int_list(); + + var (batch_index, time_step_index) = _args.TimeMajor ? (1, 0) : (0, 1); + if (!_args.Stateful) + { + input_spec_shape[batch_index] = -1; + } + input_spec_shape[time_step_index] = -1; + return new InputSpec(shape: input_spec_shape); + } + + Shape get_step_input_shape(Shape shape) + { + + // return shape[1:] if self.time_major else (shape[0],) + shape[2:] + if (_args.TimeMajor) + { + return shape.as_int_list().ToList().GetRange(1, shape.Length - 1).ToArray(); + } + else + { + return new int[] { shape.as_int_list()[0] }.concat(shape.as_int_list().ToList().GetRange(2, shape.Length - 2).ToArray()); + } + + + } + + object get_state_spec(Shape shape) + { + var state_spec_shape = shape.as_int_list(); + // append bacth dim + state_spec_shape = new int[] { -1 }.concat(state_spec_shape); + return new InputSpec(shape: state_spec_shape); + + } + + // Check whether the input shape contains any nested shapes. It could be + // (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from + // numpy inputs. + + + if (!_cell.Built) { - cell.build(input_shape); + _cell.build(input_shape); } } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + /// + /// + /// + /// + /// Binary tensor of shape [batch_size, timesteps] indicating whether a given timestep should be masked + /// + /// List of initial state tensors to be passed to the first call of the cell + /// List of constant tensors to be passed to the cell at each timestep + /// + /// + /// + protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bool? training = null, IOptionalArgs? optional_args = null) { - return base.Call(inputs, state, training); + RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs; + if(optional_args is not null && rnn_optional_args is null) + { + throw new ArgumentException("The optional args shhould be of type `RnnOptionalArgs`"); + } + Tensors? constants = rnn_optional_args?.Constants; + Tensors? mask = rnn_optional_args?.Mask; + //var (inputs_padded, row_length) = BackendImpl.convert_inputs_if_ragged(inputs); + // 暂时先不接受ragged tensor + int? row_length = null; + bool is_ragged_input = false; + _validate_args_if_ragged(is_ragged_input, mask); + + (inputs, initial_state, constants) = _process_inputs(inputs, initial_state, constants); + + _maybe_reset_cell_dropout_mask(_cell); + if (_cell is StackedRNNCells) + { + var stack_cell = _cell as StackedRNNCells; + foreach (var cell in stack_cell.Cells) + { + _maybe_reset_cell_dropout_mask(cell); + } + } + + if (mask != null) + { + // Time step masks must be the same for each input. + mask = nest.flatten(mask)[0]; + } + + Shape input_shape; + if (nest.is_nested(inputs)) + { + // In the case of nested input, use the first element for shape check + // input_shape = nest.flatten(inputs)[0].shape; + // TODO(Wanglongzhi2001) + input_shape = nest.flatten(inputs)[0].shape; + } + else + { + input_shape = inputs.shape; + } + + var timesteps = _args.TimeMajor ? input_shape[0] : input_shape[1]; + + if (_args.Unroll && timesteps != null) + { + throw new ValueError( + "Cannot unroll a RNN if the " + + "time dimension is undefined. \n" + + "- If using a Sequential model, " + + "specify the time dimension by passing " + + "an `input_shape` or `batch_input_shape` " + + "argument to your first layer. If your " + + "first layer is an Embedding, you can " + + "also use the `input_length` argument.\n" + + "- If using the functional API, specify " + + "the time dimension by passing a `shape` " + + "or `batch_shape` argument to your Input layer." + ); + } + + // cell_call_fn = (self.cell.__call__ if callable(self.cell) else self.cell.call) + Func step; + if (constants is not null) + { + if (!_cell.SupportOptionalArgs) + { + throw new ValueError( + $"RNN cell {_cell} does not support constants." + + $"Received: constants={constants}"); + } + + step = (inputs, states) => + { + constants = new Tensors(states.TakeLast(_num_constants)); + states = new Tensors(states.SkipLast(_num_constants)); + var(output, new_states) = _cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); + // TODO(Wanglongzhi2001),should cell_call_fn's return value be Tensors, Tensors? + return (output, new_states.Single); + }; + } + else + { + step = (inputs, states) => + { + // states = (states[0] if len(states) == 1 and is_tf_rnn_cell else states) + var (output, new_states) = _cell.Apply(inputs, states); + return (output, new_states.Single); + }; + } + + var (last_output, outputs, states) = BackendImpl.rnn(step, + inputs, + initial_state, + constants: constants, + go_backwards: _args.GoBackwards, + mask: mask, + unroll: _args.Unroll, + input_length: row_length != null ? new Tensor(row_length) : new Tensor(timesteps), + time_major: _args.TimeMajor, + zero_output_for_mask: _args.ZeroOutputForMask, + return_all_outputs: _args.ReturnSequences); + + if (_args.Stateful) + { + throw new NotImplementedException("this argument havn't been developed."); + } + + Tensors output = new Tensors(); + if (_args.ReturnSequences) + { + throw new NotImplementedException("this argument havn't been developed."); + + } + else + { + output = last_output; + } + + if (_args.ReturnState) + { + foreach (var state in states) + { + output.Add(state); + } + return output; + } + else + { + return output; + } + } + + public override Tensors Apply(Tensors inputs, Tensors initial_states = null, bool training = false, IOptionalArgs? optional_args = null) + { + RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs; + if (optional_args is not null && rnn_optional_args is null) + { + throw new ArgumentException("The type of optional args should be `RnnOptionalArgs`."); + } + Tensors? constants = rnn_optional_args?.Constants; + (inputs, initial_states, constants) = RnnUtils.standardize_args(inputs, initial_states, constants, _num_constants); + + if(initial_states is null && constants is null) + { + return base.Apply(inputs); + } + + // TODO(Rinne): implement it. + throw new NotImplementedException(); + } + + private (Tensors inputs, Tensors initial_state, Tensors constants) _process_inputs(Tensors inputs, Tensors initial_state, Tensors constants) + { + if (inputs.Length > 1) + { + if (_num_constants != 0) + { + initial_state = new Tensors(inputs.Skip(1)); + } + else + { + initial_state = new Tensors(inputs.Skip(1).SkipLast(_num_constants)); + constants = new Tensors(inputs.TakeLast(_num_constants)); + } + if (len(initial_state) == 0) + initial_state = null; + inputs = inputs[0]; + } + + if (_args.Stateful) + { + if (initial_state != null) + { + var tmp = new Tensor[] { }; + foreach (var s in nest.flatten(States)) + { + tmp.add(tf.math.count_nonzero((Tensor)s)); + } + var non_zero_count = tf.add_n(tmp); + //initial_state = tf.cond(non_zero_count > 0, () => States, () => initial_state); + if ((int)non_zero_count.numpy() > 0) + { + initial_state = States; + } + } + else + { + initial_state = States; + } + + } + else if (initial_state is null) + { + initial_state = get_initial_state(inputs); + } + + if (initial_state.Length != States.Length) + { + throw new ValueError( + $"Layer {this} expects {States.Length} state(s), " + + $"but it received {initial_state.Length} " + + $"initial state(s). Input received: {inputs}"); + } + + return (inputs, initial_state, constants); + } + + private void _validate_args_if_ragged(bool is_ragged_input, Tensors mask) + { + if (!is_ragged_input) + { + return; + } + + if (_args.Unroll) + { + throw new ValueError("The input received contains RaggedTensors and does " + + "not support unrolling. Disable unrolling by passing " + + "`unroll=False` in the RNN Layer constructor."); + } + if (mask != null) + { + throw new ValueError($"The mask that was passed in was {mask}, which " + + "cannot be applied to RaggedTensor inputs. Please " + + "make sure that there is no mask injected by upstream " + + "layers."); + } + + } + + void _maybe_reset_cell_dropout_mask(ILayer cell) + { + //if (cell is DropoutRNNCellMixin) + //{ + // cell.reset_dropout_mask(); + // cell.reset_recurrent_dropout_mask(); + //} } private static RNNArgs PreConstruct(RNNArgs args) @@ -77,60 +490,72 @@ private static RNNArgs PreConstruct(RNNArgs args) return args; } - public RNN New(LayerRnnCell cell, - bool return_sequences = false, - bool return_state = false, - bool go_backwards = false, - bool stateful = false, - bool unroll = false, - bool time_major = false) - => new RNN(new RNNArgs - { - Cell = cell, - ReturnSequences = return_sequences, - ReturnState = return_state, - GoBackwards = go_backwards, - Stateful = stateful, - Unroll = unroll, - TimeMajor = time_major - }); - - public RNN New(IList cell, - bool return_sequences = false, - bool return_state = false, - bool go_backwards = false, - bool stateful = false, - bool unroll = false, - bool time_major = false) - => new RNN(new RNNArgs - { - Cell = new StackedRNNCells(new StackedRNNCellsArgs { Cells = cell }), - ReturnSequences = return_sequences, - ReturnState = return_state, - GoBackwards = go_backwards, - Stateful = stateful, - Unroll = unroll, - TimeMajor = time_major - }); - - - protected Tensor get_initial_state(Tensor inputs) + public Tensors __call__(Tensors inputs, Tensor state = null, Tensor training = null) { - return _generate_zero_filled_state_for_cell(null, null); + throw new NotImplementedException(); } - Tensor _generate_zero_filled_state_for_cell(LSTMCell cell, Tensor batch_size) + // 好像不能cell不能传接口类型 + //public RNN New(IRnnArgCell cell, + // bool return_sequences = false, + // bool return_state = false, + // bool go_backwards = false, + // bool stateful = false, + // bool unroll = false, + // bool time_major = false) + // => new RNN(new RNNArgs + // { + // Cell = cell, + // ReturnSequences = return_sequences, + // ReturnState = return_state, + // GoBackwards = go_backwards, + // Stateful = stateful, + // Unroll = unroll, + // TimeMajor = time_major + // }); + + //public RNN New(List cell, + // bool return_sequences = false, + // bool return_state = false, + // bool go_backwards = false, + // bool stateful = false, + // bool unroll = false, + // bool time_major = false) + // => new RNN(new RNNArgs + // { + // Cell = cell, + // ReturnSequences = return_sequences, + // ReturnState = return_state, + // GoBackwards = go_backwards, + // Stateful = stateful, + // Unroll = unroll, + // TimeMajor = time_major + // }); + + + protected Tensors get_initial_state(Tensors inputs) { - throw new NotImplementedException(""); + var input = inputs[0]; + var input_shape = input.shape; + var batch_size = _args.TimeMajor ? input_shape[1] : input_shape[0]; + var dtype = input.dtype; + Tensors init_state; + if (_cell is RnnCellBase rnn_base_cell) + { + init_state = rnn_base_cell.GetInitialState(null, batch_size, dtype); + } + else + { + init_state = RnnUtils.generate_zero_filled_state(batch_size, _cell.StateSize, dtype); + } + + return init_state; } // Check whether the state_size contains multiple states. - public static bool _is_multiple_state(object state_size) + public static bool is_multiple_state(GeneralizedTensorShape state_size) { - var myIndexerProperty = state_size.GetType().GetProperty("Item"); - return myIndexerProperty != null - && myIndexerProperty.GetIndexParameters().Length == 1 - && !(state_size.GetType() == typeof(Shape)); + return state_size.Shapes.Length > 1; } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs new file mode 100644 index 000000000..018b17780 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.Engine; + +namespace Tensorflow.Keras.Layers.Rnn +{ + public abstract class RnnBase: Layer + { + public RnnBase(LayerArgs args): base(args) { } + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs new file mode 100644 index 000000000..fcb5d1ebf --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs @@ -0,0 +1,24 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Common.Types; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.ArgsDefinition.Rnn; +using Tensorflow.Keras.Engine; +using Tensorflow.Keras.Utils; + +namespace Tensorflow.Keras.Layers.Rnn +{ + public abstract class RnnCellBase: Layer, IRnnCell + { + public RnnCellBase(LayerArgs args) : base(args) { } + public abstract GeneralizedTensorShape StateSize { get; } + public abstract GeneralizedTensorShape OutputSize { get; } + public abstract bool SupportOptionalArgs { get; } + public abstract (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null); + public virtual Tensors GetInitialState(Tensors inputs, long batch_size, TF_DataType dtype) + { + return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size, dtype); + } + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs index 2d7aab70e..22d0e2770 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs @@ -10,18 +10,36 @@ namespace Tensorflow.Keras.Layers.Rnn public class SimpleRNN : RNN { SimpleRNNArgs args; - public SimpleRNN(SimpleRNNArgs args) : base(args) + public SimpleRNN(SimpleRNNArgs args) : base(CreateCellForArgs(args)) { this.args = args; } + private static SimpleRNNArgs CreateCellForArgs(SimpleRNNArgs args) + { + args.Cell = new SimpleRNNCell(new SimpleRNNCellArgs() + { + Units = args.Units, + Activation = args.Activation, + UseBias = args.UseBias, + KernelInitializer = args.KernelInitializer, + RecurrentInitializer = args.RecurrentInitializer, + BiasInitializer = args.BiasInitializer, + Dropout = args.Dropout, + RecurrentDropout = args.RecurrentDropout, + DType = args.DType, + Trainable = args.Trainable, + }); + return args; + } + public override void build(KerasShapesWrapper input_shape) { var single_shape = input_shape.ToSingleShape(); var input_dim = single_shape[-1]; _buildInputShape = input_shape; - kernel = add_weight("kernel", (single_shape[-1], args.Units), + _kernel = add_weight("kernel", (single_shape[-1], args.Units), initializer: args.KernelInitializer //regularizer = self.kernel_regularizer, //constraint = self.kernel_constraint, diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs index 46061b211..abb57d8ad 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs @@ -4,47 +4,128 @@ using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers.Rnn { - public class SimpleRNNCell : Layer + /// + /// Cell class for SimpleRNN. + /// See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) + /// for details about the usage of RNN API. + /// This class processes one step within the whole time sequence input, whereas + /// `tf.keras.layer.SimpleRNN` processes the whole sequence. + /// + public class SimpleRNNCell : DropoutRNNCellMixin { - SimpleRNNArgs args; - IVariableV1 kernel; - IVariableV1 recurrent_kernel; - IVariableV1 bias; + SimpleRNNCellArgs _args; + IVariableV1 _kernel; + IVariableV1 _recurrent_kernel; + IVariableV1 _bias; + GeneralizedTensorShape _state_size; + GeneralizedTensorShape _output_size; - public SimpleRNNCell(SimpleRNNArgs args) : base(args) + public override GeneralizedTensorShape StateSize => _state_size; + public override GeneralizedTensorShape OutputSize => _output_size; + public override bool SupportOptionalArgs => false; + + public SimpleRNNCell(SimpleRNNCellArgs args) : base(args) { - this.args = args; + this._args = args; + if (args.Units <= 0) + { + throw new ValueError( + $"units must be a positive integer, got {args.Units}"); + } + this._args.Dropout = Math.Min(1f, Math.Max(0f, this._args.Dropout)); + this._args.RecurrentDropout = Math.Min(1f, Math.Max(0f, this._args.RecurrentDropout)); + _state_size = new GeneralizedTensorShape(args.Units); + _output_size = new GeneralizedTensorShape(args.Units); } public override void build(KerasShapesWrapper input_shape) { + // TODO(Rinne): add the cache. var single_shape = input_shape.ToSingleShape(); var input_dim = single_shape[-1]; - kernel = add_weight("kernel", (single_shape[-1], args.Units), - initializer: args.KernelInitializer + _kernel = add_weight("kernel", (single_shape[-1], _args.Units), + initializer: _args.KernelInitializer ); - recurrent_kernel = add_weight("recurrent_kernel", (args.Units, args.Units), - initializer: args.RecurrentInitializer + _recurrent_kernel = add_weight("recurrent_kernel", (_args.Units, _args.Units), + initializer: _args.RecurrentInitializer ); - if (args.UseBias) + if (_args.UseBias) { - bias = add_weight("bias", (args.Units), - initializer: args.BiasInitializer + _bias = add_weight("bias", (_args.Units), + initializer: _args.BiasInitializer ); } built = true; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + public override (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null) { - return base.Call(inputs, state, training); + // TODO(Rinne): check if it will have multiple tensors when not nested. + Tensor prev_output = states[0]; + var dp_mask = get_dropout_maskcell_for_cell(inputs, training.Value); + var rec_dp_mask = get_recurrent_dropout_maskcell_for_cell(prev_output, training.Value); + + Tensor h; + var ranks = inputs.rank; + if (dp_mask != null) + { + if (ranks > 2) + { + // 因为multiply函数会自动添加第一个维度,所以加上下标0 + h = tf.linalg.tensordot(math_ops.multiply(inputs, dp_mask)[0], _kernel.AsTensor(), new[,] { { ranks - 1 }, { 0 } }); + } + else + { + h = math_ops.matmul(math_ops.multiply(inputs, dp_mask)[0], _kernel.AsTensor()); + } + } + else + { + if (ranks > 2) + { + h = tf.linalg.tensordot(inputs, _kernel.AsTensor(), new[,] { { ranks - 1 }, { 0 } }); + } + else + { + h = math_ops.matmul(inputs, _kernel.AsTensor()); + } + } + + if (_bias != null) + { + h = tf.nn.bias_add(h, _bias); + } + + if (rec_dp_mask != null) + { + prev_output = math_ops.multiply(prev_output, rec_dp_mask)[0]; + } + + ranks = prev_output.rank; + Tensor output; + if (ranks > 2) + { + output = h + tf.linalg.tensordot(prev_output[0], _recurrent_kernel.AsTensor(), new[,] { { ranks - 1 }, { 0 } }); + } + else + { + output = h + math_ops.matmul(prev_output, _recurrent_kernel.AsTensor()); + } + Console.WriteLine($"shape of output: {output.shape}"); + + if (_args.Activation != null) + { + output = _args.Activation.Apply(output); + } + return (output, new Tensors { output }); } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs index 20962df1f..7923192fa 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.ComponentModel; +using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; @@ -8,7 +9,7 @@ namespace Tensorflow.Keras.Layers.Rnn { - public class StackedRNNCells : Layer, RNNArgs.IRnnArgCell + public class StackedRNNCells : Layer, IRnnCell { public IList Cells { get; set; } public bool reverse_state_order; @@ -51,7 +52,7 @@ public object output_size { return lastCell.output_size; } - else if (RNN._is_multiple_state(lastCell.state_size)) + else if (RNN.is_multiple_state(lastCell.StateSize)) { // return ((dynamic)Cells[-1].state_size)[0]; throw new NotImplementedException(""); @@ -162,5 +163,13 @@ public void from_config() // deserialize_layer(cell_config, custom_objects = custom_objects)) // return cls(cells, **config) } + + public (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null) + { + throw new NotImplementedException(); + } + public GeneralizedTensorShape StateSize => throw new NotImplementedException(); + public GeneralizedTensorShape OutputSize => throw new NotImplementedException(); + public bool SupportOptionalArgs => throw new NotImplementedException(); } } diff --git a/src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs b/src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs index 1ac4a277c..6dfec3196 100644 --- a/src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs +++ b/src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs @@ -10,6 +10,7 @@ using static Tensorflow.Binding; using Tensorflow.Functions; using System.Threading; +using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers { @@ -34,7 +35,7 @@ public TensorFlowOpLayer(TensorFlowOpLayerArgs args) built = true; } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { if (tf.Context.executing_eagerly()) return DeFunCall(inputs); diff --git a/src/TensorFlowNET.Keras/Metrics/metrics_utils.cs b/src/TensorFlowNET.Keras/Metrics/metrics_utils.cs index be6a49ec5..3c2f8a7be 100644 --- a/src/TensorFlowNET.Keras/Metrics/metrics_utils.cs +++ b/src/TensorFlowNET.Keras/Metrics/metrics_utils.cs @@ -304,7 +304,7 @@ private static Tensor _filter_top_k(Tensor x, int k) var NEG_INF = -1e10; var (_, top_k_idx) = tf.math.top_k(x, k, sorted: false); var top_k_mask = tf.reduce_sum( - tf.one_hot(top_k_idx, (int)x.shape[-1], axis: -1), axis: -2); + tf.one_hot(top_k_idx.Single, (int)x.shape[-1], axis: -1), axis: -2); return x * top_k_mask + NEG_INF * (1 - top_k_mask); } } diff --git a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs index fa19987b1..4acae4265 100644 --- a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs +++ b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs @@ -129,7 +129,7 @@ public IDatasetV2 timeseries_dataset_from_array(Tensor data, int sequence_length var indices = z.map(m => { var (i, positions) = m; - return tf.range(positions[i], positions[i] + sequence_length_tensor * sampling_rate_tensor, sampling_rate_tensor); + return tf.range(positions.Single[i], positions.Single[i] + sequence_length_tensor * sampling_rate_tensor, sampling_rate_tensor); }, num_parallel_calls: -1); var dataset = sequences_from_indices(data, indices, start_index, end_index); diff --git a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs index a26879e0c..396ad20eb 100644 --- a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs +++ b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs @@ -8,7 +8,7 @@ using System.Linq; using System.Reflection; using System.Text.RegularExpressions; -using Tensorflow.Extensions; +using Tensorflow.Common.Extensions; using Tensorflow.Framework.Models; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; diff --git a/src/TensorFlowNET.Keras/Utils/RnnUtils.cs b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs new file mode 100644 index 000000000..3109eb77b --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs @@ -0,0 +1,93 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using Tensorflow.Common.Types; +using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Common.Extensions; + +namespace Tensorflow.Keras.Utils +{ + internal static class RnnUtils + { + internal static Tensors generate_zero_filled_state(long batch_size_tensor, GeneralizedTensorShape state_size, TF_DataType dtype) + { + Func create_zeros; + create_zeros = (GeneralizedTensorShape unnested_state_size) => + { + var flat_dims = unnested_state_size.ToSingleShape().dims; + var init_state_size = new long[] { batch_size_tensor }.Concat(flat_dims).ToArray(); + return array_ops.zeros(new Shape(init_state_size), dtype: dtype); + }; + + // TODO(Rinne): map structure with nested tensors. + if(state_size.Shapes.Length > 1) + { + return new Tensors(state_size.ToShapeArray().Select(s => create_zeros(new GeneralizedTensorShape(s)))); + } + else + { + return create_zeros(state_size); + } + + } + + internal static Tensors generate_zero_filled_state_for_cell(IRnnCell cell, Tensors inputs, long batch_size, TF_DataType dtype) + { + if (inputs != null) + { + batch_size = inputs.shape[0]; + dtype = inputs.dtype; + } + return generate_zero_filled_state(batch_size, cell.StateSize, dtype); + } + + /// + /// Standardizes `__call__` to a single list of tensor inputs. + /// + /// When running a model loaded from a file, the input tensors + /// `initial_state` and `constants` can be passed to `RNN.__call__()` as part + /// of `inputs` instead of by the dedicated keyword arguments.This method + /// makes sure the arguments are separated and that `initial_state` and + /// `constants` are lists of tensors(or None). + /// + /// Tensor or list/tuple of tensors. which may include constants + /// and initial states.In that case `num_constant` must be specified. + /// Tensor or list of tensors or None, initial states. + /// Tensor or list of tensors or None, constant tensors. + /// Expected number of constants (if constants are passed as + /// part of the `inputs` list. + /// + internal static (Tensors, Tensors, Tensors) standardize_args(Tensors inputs, Tensors initial_state, Tensors constants, int num_constants) + { + if(inputs.Length > 1) + { + // There are several situations here: + // In the graph mode, __call__ will be only called once. The initial_state + // and constants could be in inputs (from file loading). + // In the eager mode, __call__ will be called twice, once during + // rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be + // model.fit/train_on_batch/predict with real np data. In the second case, + // the inputs will contain initial_state and constants as eager tensor. + // + // For either case, the real input is the first item in the list, which + // could be a nested structure itself. Then followed by initial_states, which + // could be a list of items, or list of list if the initial_state is complex + // structure, and finally followed by constants which is a flat list. + Debug.Assert(initial_state is null && constants is null); + if(num_constants > 0) + { + constants = inputs.TakeLast(num_constants).ToTensors(); + inputs = inputs.SkipLast(num_constants).ToTensors(); + } + if(inputs.Length > 1) + { + initial_state = inputs.Skip(1).ToTensors(); + inputs = inputs.Take(1).ToTensors(); + } + } + + return (inputs, initial_state, constants); + } + } +} diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs index 3de337469..f4980b82d 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs @@ -144,17 +144,6 @@ public void EinsumDense() Assert.AreEqual(expected_output, actual_output); } - [TestMethod, Ignore("WIP")] - public void SimpleRNN() - { - var inputs = np.arange(6 * 10 * 8).reshape((6, 10, 8)).astype(np.float32); - /*var simple_rnn = keras.layers.SimpleRNN(4); - var output = simple_rnn.Apply(inputs); - Assert.AreEqual((32, 4), output.shape);*/ - var simple_rnn = tf.keras.layers.SimpleRNN(4, return_sequences: true, return_state: true); - var (whole_sequence_output, final_state) = simple_rnn.Apply(inputs); - } - [TestMethod] public void Resizing() { diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs new file mode 100644 index 000000000..55663d41c --- /dev/null +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -0,0 +1,28 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Tensorflow.NumPy; +using static Tensorflow.Binding; + +namespace Tensorflow.Keras.UnitTest.Layers +{ + [TestClass] + public class Rnn + { + [TestMethod] + public void SimpleRNN() + { + var inputs = np.arange(6 * 10 * 8).reshape((6, 10, 8)).astype(np.float32); + /*var simple_rnn = keras.layers.SimpleRNN(4); + var output = simple_rnn.Apply(inputs); + Assert.AreEqual((32, 4), output.shape);*/ + var simple_rnn = tf.keras.layers.SimpleRNN(4, return_sequences: true, return_state: true); + var (whole_sequence_output, final_state) = simple_rnn.Apply(inputs); + Console.WriteLine(whole_sequence_output); + Console.WriteLine(final_state); + } + } +} diff --git a/tools/TensorFlowNET.Console/SimpleRnnTest.cs b/tools/TensorFlowNET.Console/SimpleRnnTest.cs index 9769eb655..ae6ebb8a8 100644 --- a/tools/TensorFlowNET.Console/SimpleRnnTest.cs +++ b/tools/TensorFlowNET.Console/SimpleRnnTest.cs @@ -20,7 +20,7 @@ public void Run() // whole_sequence_output has shape `[32, 10, 4]`. // final_state has shape `[32, 4]`. - var (whole_sequence_output, final_state) = simple_rnn.Apply(inputs); + var (whole_sequence_output, final_states) = simple_rnn.Apply(inputs); } } } From 4939105b8f2de49d1f943c7edafd1b35690366ff Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Wed, 7 Jun 2023 07:56:19 +0800 Subject: [PATCH 032/182] feat: add Nestable and Nest. --- .../Common/Extensions/LinqExtensions.cs | 7 + .../Common/Extensions/NestExtensions.cs | 33 ++ .../Common/Types/GeneralizedTensorShape.cs | 53 +- src/TensorFlowNET.Core/Common/Types/INest.cs | 27 ++ .../Common/Types/INestable.cs | 11 + .../Common/Types/Nest.Static.cs | 62 +++ src/TensorFlowNET.Core/Common/Types/Nest.cs | 458 ++++++++++++++++++ .../Common/Types/NestDictionary.cs | 99 ++++ .../Common/Types/NestList.cs | 43 ++ .../Common/Types/NestNode.cs | 32 ++ src/TensorFlowNET.Core/Tensors/Tensors.cs | 211 ++++---- src/TensorFlowNET.Core/Util/nest.py.cs | 34 +- 12 files changed, 946 insertions(+), 124 deletions(-) create mode 100644 src/TensorFlowNET.Core/Common/Extensions/NestExtensions.cs create mode 100644 src/TensorFlowNET.Core/Common/Types/INest.cs create mode 100644 src/TensorFlowNET.Core/Common/Types/INestable.cs create mode 100644 src/TensorFlowNET.Core/Common/Types/Nest.Static.cs create mode 100644 src/TensorFlowNET.Core/Common/Types/Nest.cs create mode 100644 src/TensorFlowNET.Core/Common/Types/NestDictionary.cs create mode 100644 src/TensorFlowNET.Core/Common/Types/NestList.cs create mode 100644 src/TensorFlowNET.Core/Common/Types/NestNode.cs diff --git a/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs b/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs index 0402fca03..6cf62e7b8 100644 --- a/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs +++ b/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs @@ -22,5 +22,12 @@ public static Tensors ToTensors(this IEnumerable tensors) { return new Tensors(tensors); } + + public static void Deconstruct(this (T1, T2, T3) values, out T1 first, out T2 second, out T3 third) + { + first = values.Item1; + second = values.Item2; + third = values.Item3; + } } } diff --git a/src/TensorFlowNET.Core/Common/Extensions/NestExtensions.cs b/src/TensorFlowNET.Core/Common/Extensions/NestExtensions.cs new file mode 100644 index 000000000..76bdd6133 --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Extensions/NestExtensions.cs @@ -0,0 +1,33 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Common.Types; + +namespace Tensorflow.Common.Extensions +{ + public static class NestExtensions + { + public static Tensors ToTensors(this INestable tensors) + { + return new Tensors(tensors.AsNest()); + } + + public static Tensors? ToTensors(this Nest tensors) + { + return Tensors.FromNest(tensors); + } + + /// + /// If the nested object is already a nested type, this function could reduce it. + /// For example, `Nest[Nest[T]]` can be reduced to `Nest[T]`. + /// + /// + /// + /// + /// + public static Nest ReduceTo(this INestStructure input) where TIn: INestStructure + { + return Nest.ReduceFrom(input); + } + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs index edb9a802f..e05d3deb3 100644 --- a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs +++ b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs @@ -5,7 +5,7 @@ namespace Tensorflow.Common.Types { - public class GeneralizedTensorShape: IEnumerable + public class GeneralizedTensorShape: IEnumerable, INestStructure, INestable { public TensorShapeConfig[] Shapes { get; set; } /// @@ -63,6 +63,57 @@ public Shape[] ToShapeArray() return Shapes.Select(x => new Shape(x.Items.Select(y => y is null ? -1 : y.Value).ToArray())).ToArray(); } + public IEnumerable Flatten() + { + List result = new List(); + foreach(var shapeConfig in Shapes) + { + result.AddRange(shapeConfig.Items); + } + return result; + } + public INestStructure MapStructure(Func func) + { + List> lists = new(); + foreach(var shapeConfig in Shapes) + { + lists.Add(new Nest(shapeConfig.Items.Select(x => new Nest(func(x))))); + } + return new Nest(lists); + } + + public Nest AsNest() + { + Nest DealWithSingleShape(TensorShapeConfig config) + { + if (config.Items.Length == 0) + { + return Nest.Empty; + } + else if (config.Items.Length == 1) + { + return new Nest(config.Items[0]); + } + else + { + return new Nest(config.Items.Select(x => new Nest(x))); + } + } + + if(Shapes.Length == 0) + { + return Nest.Empty; + } + else if(Shapes.Length == 1) + { + return DealWithSingleShape(Shapes[0]); + } + else + { + return new Nest(Shapes.Select(s => DealWithSingleShape(s))); + } + } + public IEnumerator GetEnumerator() { foreach (var shape in Shapes) diff --git a/src/TensorFlowNET.Core/Common/Types/INest.cs b/src/TensorFlowNET.Core/Common/Types/INest.cs new file mode 100644 index 000000000..001141ddc --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/INest.cs @@ -0,0 +1,27 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Common.Types +{ + /// + /// This interface indicates that a class may have a nested structure and provide + /// methods to manipulate with the structure. + /// + public interface INestStructure: INestable + { + /// + /// Flatten the Nestable object. Node that if the object contains only one value, + /// it will be flattened to an enumerable with one element. + /// + /// + IEnumerable Flatten(); + /// + /// Construct a new object with the same nested structure. + /// + /// + /// + /// + INestStructure MapStructure(Func func); + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/INestable.cs b/src/TensorFlowNET.Core/Common/Types/INestable.cs new file mode 100644 index 000000000..7ce49f85a --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/INestable.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Common.Types +{ + public interface INestable + { + Nest AsNest(); + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs b/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs new file mode 100644 index 000000000..b67d11f42 --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs @@ -0,0 +1,62 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Common.Types +{ + public static class Nest + { + /// + /// Pack the flat items to a nested sequence by the template. + /// + /// + /// + /// + /// + public static Nest PackSequenceAs(INestable template, T[] flatItems) + { + return template.AsNest().PackSequence(flatItems); + } + + /// + /// Pack the flat items to a nested sequence by the template. + /// + /// + /// + /// + /// + public static Nest PackSequenceAs(INestable template, List flatItems) + { + return template.AsNest().PackSequence(flatItems.ToArray()); + } + + /// + /// Flatten the nested object. + /// + /// + /// + /// + public static IEnumerable Flatten(INestable nestedObject) + { + return nestedObject.AsNest().Flatten(); + } + + /// + /// Map the structure with specified function. + /// + /// + /// + /// + /// + /// + public static INestStructure MapStructure(Func func, INestable nestedObject) + { + return nestedObject.AsNest().MapStructure(func); + } + + public static bool IsNested(INestable obj) + { + return obj.AsNest().IsNested(); + } + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/Nest.cs b/src/TensorFlowNET.Core/Common/Types/Nest.cs new file mode 100644 index 000000000..84a60402e --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/Nest.cs @@ -0,0 +1,458 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Common.Extensions; + +namespace Tensorflow.Common.Types +{ + public enum NestType + { + Empty, + Node, + List, + Dictionary + } + + /// + /// A nested structure which may inclulde value, list and dictionary. + /// Note that dictionary does not ensure the data order. When using it as IEnumerable, + /// its order is depth-first. + /// + /// + public class Nest : INestStructure, IEnumerable + { + private static readonly Nest _empty = new Nest() + { + NestType = NestType.Empty, + }; + public static Nest Empty => _empty; + public NestType NestType { get; protected set; } + public string? Name { get; set; } + public T? Value { get; protected set; } + public List>? ListValue { get; protected set; } + public Dictionary>? DictValue { get; protected set; } + + protected Nest() { } + + public Nest(T value, string? name = null) + { + Value = value; + Name = name; + NestType = NestType.Node; + } + + public Nest(IEnumerable> values, string? name = null) + { + ListValue = values.ToList(); + Name = name; + NestType = NestType.List; + } + + public Nest(Dictionary> value, string? name = null) + { + DictValue = value; + Name = name; + NestType = NestType.Dictionary; + } + + public Nest(Nest other) + { + NestType = other.NestType; + Value = other.Value; + DictValue = other.DictValue; + ListValue = other.ListValue; + Name = other.Name; + } + + public virtual IEnumerable Flatten() + { + return FlattenInternal(this); + } + public virtual INestStructure MapStructure(Func func) + { + return MapStructureInternal(func); + } + + /// + /// Pack the flat items to a nested sequence by the template. + /// + /// + /// + public virtual Nest PackSequence(T[] flatItems) + { + if(flatItems.Length == 0) + { + return Nest.Empty; + } + int index = 0; + return PackSequenceInternal(this, flatItems, ref index); + } + + private static Nest PackSequenceInternal(Nest template, T[] flatItems, ref int index) + { + if(template.NestType == NestType.Node) + { + if(index >= flatItems.Length) + { + throw new InvalidArgumentError("The template and flat items are not matched."); + } + return new Nest(flatItems[index++]); + } + else if(template.NestType == NestType.List) + { + List> nestedObjects = new List>(); + for (int i = 0; i < template.ListValue!.Count; i++) + { + nestedObjects.Add(PackSequenceInternal(template.ListValue![i], flatItems, ref index)); + } + return new Nest(nestedObjects); + } + else if(template.NestType == NestType.Node) + { + Dictionary> dict = new Dictionary>(); + foreach(var (key, value) in template.DictValue!) + { + dict[key] = PackSequenceInternal(value, flatItems, ref index); + } + return new Nest(dict); + } + // Consider Empty as invalid type. + throw new InvalidArgumentError("When using `PackSequenceAs`, the template cannot contain empty node."); + } + + public virtual Nest AsNest() + { + return this; + } + + public virtual Nest MergeWith(Nest? other) + { + if(other is null || other == Nest.Empty) + { + return this; + } + if(this == Nest.Empty) + { + return other; + } + if(NestType == NestType.Node && other.NestType == NestType.Node) + { + return new Nest(new Nest[] { this, other }); + } + else if(NestType == NestType.List && other.NestType == NestType.List) + { + return new Nest(this.ListValue!.Concat(other.ListValue!)); + } + else if(NestType == NestType.Dictionary && other.NestType == NestType.Dictionary) + { + return new Nest(this.DictValue!.Concat(other.DictValue!).ToDictionary(x => x.Key, x => x.Value)); + } + else + { + return new Nest(new Nest[] { this, other }); + } + } + + /// + /// To see if the nested object is really nested. Despite being called `Nest`, sometimes it's actually not + /// nested. For example, [1, 2, 3] is not nested, while [1, [2, 3]] is nested. + /// + /// + public bool IsNested() + { + if(NestType is NestType.Empty or NestType.Node) + { + return false; + } + else if(NestType is NestType.List) + { + foreach(var item in ListValue!) + { + if(item.NestType is NestType.List or NestType.Dictionary) + { + return true; + } + } + return false; + } + else + { + foreach (var item in DictValue!.Values) + { + if (item.NestType is NestType.List or NestType.Dictionary) + { + return true; + } + } + return false; + } + } + + [Obsolete("The indexer of Tensors is not encouraged because it leads to unclear meanings.")] + public T this[int index] + { + get + { + bool success = FindInternal(this, index, out var result); + if (success) + { + return result; + } + else + { + throw new IndexOutOfRangeException(); + } + } + set + { + bool success = SetInternal(this, index, value); + if (!success) + { + throw new IndexOutOfRangeException(); + } + } + } + + /// + /// If the existing nested structure if of type `Nest[INestStructure[T]]`, we can reduce it + /// to `Nest[T]`. + /// + /// + /// + /// + public static Nest ReduceFrom(INestStructure input) where TOut: INestStructure + { + var nested = input.AsNest(); + return ReduceInternal(nested); + } + + private static Nest ReduceInternal(Nest node) where TOut : INestStructure + { + if(node.NestType == NestType.Empty) + { + return Nest.Empty; + } + else if(node.NestType == NestType.Node) + { + return node.Value!.AsNest(); + } + else if(node.NestType == NestType.List) + { + return new Nest(node.ListValue!.Select(x => ReduceInternal(x))); + } + else // Dictionary type + { + return new Nest(node.DictValue!.ToDictionary(x => x.Key, x => ReduceInternal(x.Value))); + } + } + + private static bool FindInternal(Nest node, int index, out T? result) + { + if (node.NestType == NestType.Node) + { + if(index == 0) + { + result = node.Value!; + return true; + } + result = default(T); + return false; + } + else if (node.NestType == NestType.List) + { + foreach (var item in node.ListValue!) + { + if(index == 0) + { + return FindInternal(item, index, out result); + } + index--; + } + result = default(T); + return false; + } + else if(node.NestType == NestType.Dictionary) + { + foreach (var item in node.DictValue!.Values) + { + if (index == 0) + { + return FindInternal(item, index, out result); + } + index--; + } + result = default(T); + return false; + } + else + { + result = default(T); + return false; + } + } + + private static bool SetInternal(Nest node, int index, T newValue) + { + if (node.NestType == NestType.Node) + { + if (index == 0) + { + node.Value = newValue; + return true; + } + return false; + } + else if (node.NestType == NestType.List) + { + foreach (var item in node.ListValue!) + { + if (index == 0) + { + return SetInternal(item, index, newValue); + } + index--; + } + return false; + } + else if (node.NestType == NestType.Dictionary) + { + foreach (var item in node.DictValue!.Values) + { + if (index == 0) + { + return SetInternal(item, index, newValue); + } + index--; + } + return false; + } + else + { + return false; + } + } + + private static IEnumerable FlattenInternal(Nest node) + { + if (node.NestType == NestType.Node) + { + yield return node.Value!; + } + else if (node.NestType == NestType.List) + { + foreach (var item in node.ListValue!) + { + foreach(var val in FlattenInternal(item)) + { + yield return val; + } + } + } + else if (node.NestType == NestType.Dictionary) + { + foreach (var item in node.DictValue!.Values) + { + foreach (var val in FlattenInternal(item)) + { + yield return val; + } + } + } + } + + private Nest MapStructureInternal(Func func) + { + if (NestType == NestType.Node) + { + return new Nest(func(Value!)); + } + else if (NestType == NestType.List) + { + List> outs = new List>(); + foreach (var item in ListValue!) + { + outs.Add(item.MapStructureInternal(func)); + } + return new Nest(outs); + } + else if (NestType == NestType.Dictionary) + { + Dictionary> outs = new Dictionary>(); + foreach (var (key, value) in DictValue!) + { + outs.Add(key, value.MapStructureInternal(func)); + } + return new Nest(outs); + } + else + { + return Nest.Empty; + } + } + + public IEnumerator GetEnumerator() + { + return Flatten().GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public override string ToString() + { + StringBuilder sb = new StringBuilder(); + sb.Append("("); + WriteString(this, sb); + sb.Append(")"); + return sb.ToString(); + } + + private static void WriteString(Nest node, StringBuilder sb) + { + if (!string.IsNullOrEmpty(node.Name)) + { + sb.Append($"{node.Name}: "); + } + if (node.NestType == NestType.Node) + { + sb.Append(node.Value!.ToString()); + } + else if (node.NestType == NestType.List) + { + sb.Append("["); + for(int i = 0; i < node.ListValue!.Count; i++) + { + WriteString(node.ListValue![i], sb); + if(i != node.ListValue!.Count - 1) + { + sb.Append(", "); + } + } + sb.Append("]"); + } + else if (node.NestType == NestType.Dictionary) + { + sb.Append("{"); + int count = node.DictValue!.Count; + int i = 0; + foreach (var (key, value) in node.DictValue!) + { + sb.Append($"{key}: "); + WriteString(value, sb); + if (i != count - 1) + { + sb.Append(", "); + } + i++; + } + sb.Append("}"); + } + else + { + sb.Append(""); + } + } + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs b/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs new file mode 100644 index 000000000..554ca526d --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs @@ -0,0 +1,99 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Common.Types +{ + public class NestDictionary : INestStructure, IDictionary where TKey : notnull + { + public IDictionary Value { get; set; } + public NestDictionary(IDictionary dict) + { + Value = dict; + } + public IEnumerable Flatten() + { + return Value.Select(x => x.Value); + } + public INestStructure MapStructure(Func func) + { + return new NestList(Value.Select(x => func(x.Value))); + } + + public Nest AsNest() + { + return new Nest(Value.Values.Select(x => new Nest(x))); + } + + // Required IDictionary members + public int Count => Value.Count; + + public bool IsReadOnly => Value.IsReadOnly; + + public ICollection Keys => Value.Keys; + + public ICollection Values => Value.Values; + + public void Add(TKey key, TValue value) + { + Value.Add(key, value); + } + + public void Add(KeyValuePair item) + { + Value.Add(item); + } + + public void Clear() + { + Value.Clear(); + } + + public bool Contains(KeyValuePair item) + { + return Value.Contains(item); + } + + public bool ContainsKey(TKey key) + { + return Value.ContainsKey(key); + } + + public void CopyTo(KeyValuePair[] array, int arrayIndex) + { + Value.CopyTo(array, arrayIndex); + } + + public IEnumerator> GetEnumerator() + { + return Value.GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public bool Remove(TKey key) + { + return Value.Remove(key); + } + + public bool Remove(KeyValuePair item) + { + return Value.Remove(item); + } + + public bool TryGetValue(TKey key, out TValue value) + { + return Value.TryGetValue(key, out value); + } + + // Optional IDictionary members + public TValue this[TKey key] + { + get => Value[key]; + set => Value[key] = value; + } + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/NestList.cs b/src/TensorFlowNET.Core/Common/Types/NestList.cs new file mode 100644 index 000000000..082187188 --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/NestList.cs @@ -0,0 +1,43 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Common.Types +{ + /// + /// The implementation of a list that support nest structure, in which the depth is 1. + /// + /// + public sealed class NestList : INestStructure, IEnumerable + { + public List Value { get; set; } + public NestList(IEnumerable values) + { + Value = new List(values); + } + public IEnumerable Flatten() + { + return Value; + } + public INestStructure MapStructure(Func func) + { + return new NestList(Value.Select(x => func(x))); + } + + public Nest AsNest() + { + return new Nest(Value.Select(x => new Nest(x))); + } + + // Enumerator implementation + public IEnumerator GetEnumerator() + { + return Value.GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/NestNode.cs b/src/TensorFlowNET.Core/Common/Types/NestNode.cs new file mode 100644 index 000000000..1dad421d9 --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/NestNode.cs @@ -0,0 +1,32 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Common.Types +{ + /// + /// A nested structure with only one element. + /// + /// + public class NestNode : INestStructure + { + public T Value { get; set; } + public NestNode(T value) + { + Value = value; + } + public IEnumerable Flatten() + { + yield return Value; + } + public INestStructure MapStructure(Func func) + { + return new NestNode(func(Value)); + } + + public Nest AsNest() + { + return new Nest(Value); + } + } +} diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs index caa36b761..cba8f9541 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensors.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs @@ -3,6 +3,7 @@ using System.Collections; using System.Collections.Generic; using System.Linq; +using Tensorflow.Common.Types; namespace Tensorflow { @@ -13,16 +14,14 @@ namespace Tensorflow /// and Tensor[] from Tensors implicitily. /// It works for tuple and scalar as well. /// - public class Tensors : IEnumerable, IDisposable + public sealed class Tensors : Nest, IDisposable { - List items = new List(); - - public TF_DataType dtype => items.First().dtype; - public Shape shape => items.First().shape; - public int rank => items.First().rank; - public Graph graph => items.First().graph; + public TF_DataType dtype => this.First().dtype; + public Shape shape => this.First().shape; + public int rank => this.First().rank; + public Graph graph => this.First().graph; public bool IsList { get; set; } - public int Length => items.Count(); + public int Length => this.Count(); /// /// Return a Tensor if `Tensors` has only one tensor, otherwise throw an exception. /// @@ -35,7 +34,7 @@ public Tensor Single throw new ValueError("Tensors with more than one tensor cannot be " + "implicitly converted to Tensor."); } - return items.First(); + return this.First(); } } @@ -52,150 +51,194 @@ public Tensor? SingleOrNull throw new ValueError($"Tensors with {Length} tensor cannot be " + "implicitly converted to Tensor."); } - return items.FirstOrDefault(); + return this.FirstOrDefault(); } } - public Tensor this[int index] + public Tensor this[params string[] slices] + => this.First()[slices]; + + public Tensors(Tensor tensor) : base(tensor) { - get => items[index]; - set => items[index] = value; + } - public Tensor this[params string[] slices] - => items.First()[slices]; - public Tensors(params Tensor[] tensors) + private Tensors(Nest nested) : base(nested) { - items.AddRange(tensors); + } - public Tensors(IEnumerable tensors) + public Tensors(params Tensor[] tensors): base(tensors.Select(x => new Nest(x))) { - items.AddRange(tensors); + } - public Tensors(NDArray nd) + public Tensors(IEnumerable tensors): base(tensors.Select(x => new Nest(x))) { - items.Add(ops.convert_to_tensor(nd)); + } - public IEnumerator GetEnumerator() + public Tensors(NDArray nd): base(ops.convert_to_tensor(nd)) { - foreach (var tensor in items) - yield return tensor; + } + public bool IsSingle() + { + return Length == 1; + } + + public new Tensors MergeWith(Nest? other) + { + return FromNest(base.MergeWith(other)); + } + + [Obsolete("This method is not encouraged to be used. It may be removed in the future. If you do want to add " + + "a tensor to `Tensors`, creating a new instance with your newly added tensor is a better choice.")] public void Add(Tensor tensor) - => items.Add(tensor); + { + if(NestType == NestType.Dictionary) + { + throw new ValueError("Cannot add a tensor to dictionary type of nested tensors."); + } + else if(NestType == NestType.Node) + { + NestType = NestType.List; + ListValue = new() { new Nest(Value), new Nest(tensor) }; + Value = null; + } + else + { + ListValue.Add(new Nest(tensor)); + } + } + [Obsolete("This method is not encouraged to be used. It may be removed in the future. If you do want to add " + + "some tensors to `Tensors`, creating a new instance with your newly added tensors is a better choice.")] public void AddRange(IEnumerable tensors) - => items.AddRange(tensors); + { + if (NestType == NestType.Dictionary) + { + throw new ValueError("Cannot add a tensor to dictionary type of nested tensors."); + } + else if (NestType == NestType.Node) + { + NestType = NestType.List; + ListValue = new() { new Nest(Value) }; + ListValue.AddRange(tensors.Select(x => new Nest(x))); + Value = null; + } + else + { + ListValue.AddRange(tensors.Select(x => new Nest(x))); + } + } + [Obsolete("This method is not encouraged to be used. It may be removed in the future. If you do want to insert " + + "a tensor to `Tensors`, creating a new instance with your newly added tensor is a better choice.")] public void Insert(int index, Tensor tensor) - => items.Insert(index, tensor); - - IEnumerator IEnumerable.GetEnumerator() - => GetEnumerator(); + { + if (NestType == NestType.List) + { + ListValue.Insert(index, new Nest(tensor)); + } + else if(NestType == NestType.Node) + { + NestType = NestType.List; + ListValue = new() { new Nest(Value) }; + ListValue.Insert(index, new Nest(tensor)); + Value = null; + } + else + { + throw new ValueError("Cannot add a tensor to dictionary type of nested tensors."); + } + } public string[] StringData() { - EnsureSingleTensor(this, "nnumpy"); - return this[0].StringData(); + return Single.StringData(); } public string StringData(int index) { - EnsureSingleTensor(this, "nnumpy"); - return this[0].StringData(index); + return Single.StringData(index); } public NDArray numpy() { - EnsureSingleTensor(this, "nnumpy"); - return this[0].numpy(); + return Single.numpy(); } + [Obsolete] public T[] ToArray() where T: unmanaged { - EnsureSingleTensor(this, $"ToArray<{typeof(T)}>"); - return this[0].ToArray(); + return Single.ToArray(); } #region Explicit Conversions public unsafe static explicit operator bool(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to bool"); - return (bool)tensor[0]; + return (bool)tensor.Single; } public unsafe static explicit operator sbyte(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to sbyte"); - return (sbyte)tensor[0]; + return (sbyte)tensor.Single; } public unsafe static explicit operator byte(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to byte"); - return (byte)tensor[0]; + return (byte)tensor.Single; } public unsafe static explicit operator ushort(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to ushort"); - return (ushort)tensor[0]; + return (ushort)tensor.Single; } public unsafe static explicit operator short(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to short"); - return (short)tensor[0]; + return (short)tensor.Single; } public unsafe static explicit operator int(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to int"); - return (int)tensor[0]; + return (int)tensor.Single; } public unsafe static explicit operator uint(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to uint"); - return (uint)tensor[0]; + return (uint)tensor.Single; } public unsafe static explicit operator long(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to long"); - return (long)tensor[0]; + return (long)tensor.Single; } public unsafe static explicit operator ulong(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to ulong"); - return (ulong)tensor[0]; + return (ulong)tensor.Single; } public unsafe static explicit operator float(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to byte"); - return (byte)tensor[0]; + return (byte)tensor.Single; } public unsafe static explicit operator double(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to double"); - return (double)tensor[0]; + return (double)tensor.Single; } public unsafe static explicit operator string(Tensors tensor) { - EnsureSingleTensor(tensor, "explicit conversion to string"); - return (string)tensor[0]; + return (string)tensor.Single; } public static explicit operator object[](Tensors tensors) - => tensors.items.ToArray(); + => tensors.Flatten().ToArray(); #endregion #region Implicit Conversions @@ -219,52 +262,40 @@ public static implicit operator Tensor(Tensors? tensors) => tensors?.SingleOrNull; public static implicit operator Tensor[](Tensors tensors) - => tensors.items.ToArray(); - + => tensors.Flatten().ToArray(); #endregion - public void Deconstruct(out Tensor a, out Tensors? b) + public static Tensors? FromNest(Nest nested) { - a = items[0]; - b = Length == 1? null : new Tensors(items.Skip(1)); + if(nested == Nest.Empty) + { + return null; + } + return new Tensors(nested); } - private static void EnsureSingleTensor(Tensors tensors, string methodnName) + public void Deconstruct(out Tensor a, out Tensors? b) { - if(tensors.Length == 0) - { - throw new ValueError($"Method `{methodnName}` of `Tensors` cannot be used when `Tensors` contains no Tensor."); - } - else if(tensors.Length > 1) - { - throw new ValueError($"Method `{methodnName}` of `Tensors` cannot be used when `Tensors` contains more than one Tensor."); - } + a = this.First(); + b = Length == 1? null : new Tensors(this.Skip(1)); } public override string ToString() { - if(items.Count == 1) + if(Length == 1) { - return items[0].ToString(); + return this.First().ToString(); } else { - StringBuilder sb = new StringBuilder(); - sb.Append($"Totally {items.Count} tensors, which are {string.Join(", ", items.Select(x => x.name))}\n[\n"); - for(int i = 0; i < items.Count; i++) - { - var tensor = items[i]; - sb.Append($"Tensor {i}({tensor.name}): {tensor.ToString()}\n"); - } - sb.Append("]\n"); - return sb.ToString(); + return $"Totally {Length} tensors: {base.ToString()}"; } } public void Dispose() { - foreach (var item in items) - item.Dispose(); + foreach (var tensor in this) + tensor.Dispose(); } } } diff --git a/src/TensorFlowNET.Core/Util/nest.py.cs b/src/TensorFlowNET.Core/Util/nest.py.cs index ab6f56b3e..3ba3ce78b 100644 --- a/src/TensorFlowNET.Core/Util/nest.py.cs +++ b/src/TensorFlowNET.Core/Util/nest.py.cs @@ -36,6 +36,7 @@ namespace Tensorflow.Util // (np.array([3, 4]), tf.constant([3, 4])))` // + [Obsolete] public static class nest { @@ -170,39 +171,6 @@ private static object _sequence_like(object instance, IEnumerable args) throw new TypeError("Type of sequence not supported (yet): " + instance.GetType()); } - public static bool is_nested(object obj) - { - // Refer to https://www.tensorflow.org/api_docs/python/tf/nest - //if (obj is IList || obj is IDictionary || obj is ITuple) - // return true; - if (obj is IList || obj is IDictionary) - return true; - - if (obj is NDArray || obj is Tensor || obj is string || obj.GetType().IsGenericType - || obj is ISet || obj is ISet || obj is ISet) - return false; - - if (obj.GetType().IsNested) return true; - // Check if the object is an IEnumerable - if (obj is IEnumerable) - { - // If it is, check if it is a nested structure - foreach (object item in (IEnumerable)obj) - { - if (is_nested(item)) - { - return true; - } - } - return true; - } - else - { - // If it is not, return false - return false; - } - } - /// /// Yields the next value from the given iterable. /// From 537b3e11428db323a1e9bf59e686fdf8c08e8eeb Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Wed, 7 Jun 2023 07:56:49 +0800 Subject: [PATCH 033/182] feat: support simple RNN. --- .../Keras/Layers/Rnn/IRnnCell.cs | 2 +- .../Operations/NnOps/RNNCell.cs | 1 + .../Operations/_EagerTensorArray.cs | 117 ++- src/TensorFlowNET.Keras/BackendImpl.cs | 721 +++++++++--------- src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 24 +- .../Layers/Rnn/RnnCellBase.cs | 2 +- .../Layers/Rnn/SimpleRNNCell.cs | 50 +- .../Layers/Rnn/StackedRNNCells.cs | 1 + src/TensorflowNET.Hub/KerasLayer.cs | 3 +- 9 files changed, 507 insertions(+), 414 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs index df6222cd0..d12ed1ad6 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs @@ -9,11 +9,11 @@ public interface IRnnCell: ILayer { GeneralizedTensorShape StateSize { get; } GeneralizedTensorShape OutputSize { get; } + bool IsTFRnnCell { get; } /// /// Whether the optional RNN args are supported when appying the layer. /// In other words, whether `Apply` is overwrited with process of `RnnOptionalArgs`. /// bool SupportOptionalArgs { get; } - (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null); } } diff --git a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs index 71fdc301d..26646b76a 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs @@ -183,6 +183,7 @@ public void adapt(Tensor data, int? batch_size = null, int? steps = null) } public GeneralizedTensorShape StateSize => throw new NotImplementedException(); public GeneralizedTensorShape OutputSize => throw new NotImplementedException(); + public bool IsTFRnnCell => throw new NotImplementedException(); public bool SupportOptionalArgs => throw new NotImplementedException(); } } diff --git a/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs b/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs index cf1b50af6..ed65a08d7 100644 --- a/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs @@ -17,6 +17,7 @@ limitations under the License. using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Eager; using Tensorflow.Framework; using static Tensorflow.Binding; @@ -48,6 +49,7 @@ public class _EagerTensorArray : TensorArray public override Tensor flow => _flow; bool _clear_after_read; List _tensor_array; + List _previous_read_indices; public _EagerTensorArray(TF_DataType dtype, Tensor size, bool dynamic_size = false, bool clear_after_read = true, string tensor_array_name = null, Tensor handle = null, Tensor flow = null, @@ -61,16 +63,20 @@ public _EagerTensorArray(TF_DataType dtype, Tensor size, bool dynamic_size = fal _dtype = dtype.as_base_dtype(); _dynamic_size = dynamic_size; _clear_after_read = clear_after_read; - _tensor_array = new List(); + _tensor_array = Enumerable.Repeat(null, size.numpy()).ToList(); + _previous_read_indices = new(); } public override TensorArray unstack(Tensor value, string name = null) { - return tf_with(ops.name_scope(name, "TensorArrayUnstack", new { _handle, value }), delegate + var tensors = array_ops.unstack(value, name: name); + if(tensors.Length > _tensor_array.Count && !_dynamic_size) { - var num_elements = array_ops.shape(value)[0]; - return scatter(indices: math_ops.range(0, num_elements), value: value, name: name); - }); + throw new ValueError($"Cannot unstack {tensors.Length} tensors into a TensorArray of static size {_tensor_array.Count}"); + } + _tensor_array = tensors.ToList(); + // TODO(Rinne): revise the implementation. Here we should return `parent()`. + return this; } public TensorArray scatter(Tensor indices, Tensor value, string name = null) @@ -116,9 +122,19 @@ public void _maybe_colocate_with(Tensor value) _colocate_with.Add(value); } + private Tensor _maybe_zero(int ix) + { + var val = _tensor_array[ix]; + if(val is null) + { + val = _tensor_array[ix] = array_ops.zeros(_element_shape, _dtype); + } + return val; + } + public override Tensor read(T index, string name = null) { - int index_int = -1; + int index_int; if (index is int int_index) index_int = int_index; else if (index is Tensor tensor_index) @@ -126,27 +142,75 @@ public override Tensor read(T index, string name = null) else throw new ValueError(""); + if(index_int >= _tensor_array.Count) + { + throw new OutOfRangeError($"Tried to read from index {index_int} but array size is: {_tensor_array.Count} "); + } + + var res = _tensor_array[index_int]; + if(res is null) + { + if (_previous_read_indices.Contains(index_int)) + { + throw new InvalidArgumentError($"Could not read index {index_int} twice because it was cleared after " + + $"a previous read (perhaps try setting clear_after_read = false?)"); + } + else + { + res = _maybe_zero(index_int); + } + } + if (_clear_after_read) { _tensor_array[index_int] = null; + _previous_read_indices.Add(index_int); } - - return _tensor_array[index_int]; + return res; } public override TensorArray write(Tensor index, Tensor value, string name = null) { - if (_infer_shape) - _element_shape = _element_shape.merge_with(value.shape); - _tensor_array.add(value); - return this; + int index_int; + if(index is EagerTensor eager) + { + return write(eager.numpy(), value, name); + } + throw new InvalidArgumentError("The index is supposed to be an EagerTensor"); } public override TensorArray write(int index, T value, string name = null) { - var value_tensor = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value"); - var index_tensor = ops.convert_to_tensor(index, name: "index"); - return write(index_tensor, value_tensor, name: name); + int size = _tensor_array.Count; + if(index >= size) + { + if (!_dynamic_size) + { + throw new OutOfRangeError($"Tried to write to index {index} but array is not resizeable and size " + + $"is: {size} "); + } + _tensor_array.AddRange(Enumerable.Repeat(null, index - size + 1)); + } + + Tensor tensor = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value"); + + if(_dtype != tensor.dtype) + { + throw new InvalidArgumentError($"TensorArray dtype is {_dtype.as_python_name()} but Op is " + + $"trying to write dtype {tensor.dtype.as_python_name()} "); + } + + if (!_element_shape.is_compatible_with(tensor.shape)) + { + throw new ValueError($"Incompatible shape for value ({tensor.shape}), expected ({_element_shape})"); + } + + if (_infer_shape) + { + _element_shape = _element_shape.merge_with(tensor.shape); + } + _tensor_array[index] = tensor; + return this; } private Tensor size(string name = null) @@ -156,11 +220,26 @@ private Tensor size(string name = null) public override Tensor stack(string name = null) { - ops.colocate_with(_handle); - return tf_with(ops.name_scope(name, "TensorArrayStack", new { _handle }), delegate + if(_tensor_array.Count > 0) { - return gather(math_ops.range(0, size()), name: name); - }); + for(int i = 0; i < _tensor_array.Count; i++) + { + _maybe_zero(i); + } + } + if(_tensor_array.Count == 0 && _element_shape.IsFullyDefined) + { + return ops.convert_to_tensor(new Shape(new long[] { 0 }.Concat(_element_shape.dims).ToArray()), name: name, dtype: _dtype); + } + else + { + return ops.convert_to_tensor(_tensor_array, name: name, dtype: _dtype); + } + //ops.colocate_with(_handle); + //return tf_with(ops.name_scope(name, "TensorArrayStack", new { _handle }), delegate + //{ + // return gather(math_ops.range(0, size()), name: name); + //}); } public override Tensor gather(Tensor indices, string name = null) diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index a7c1bcadf..30b73e82f 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -20,9 +20,11 @@ limitations under the License. using System.Collections.Generic; using Tensorflow.Functions; using Tensorflow.Graphs; +using Tensorflow.Common.Extensions; using static Tensorflow.Binding; using static Tensorflow.Graphs.SubGraphUtility; using Tensorflow.Util; +using Tensorflow.Common.Types; namespace Tensorflow.Keras { @@ -452,7 +454,7 @@ public Tensor conv2d_transpose(Tensor x, return x; } - public static (Tensors, Tensors, Tensors) rnn( + public (Tensors, Tensors, Tensors) rnn( Func step_function, // args:inputs, states, return:output, new_states Tensors inputs, // inputs is a tuple of tensors (one per input sequence) Tensors initial_states, @@ -466,7 +468,7 @@ public static (Tensors, Tensors, Tensors) rnn( bool return_all_outputs = true) { - Tensors swap_batch_timestep(Tensors input_t) + Tensor swap_batch_timestep(Tensor input_t) { var axes = Enumerable.Range(0, input_t.rank).ToArray(); axes[0] = 1; @@ -476,13 +478,14 @@ Tensors swap_batch_timestep(Tensors input_t) if (!time_major) { - inputs = nest.map_structure(swap_batch_timestep, inputs); + inputs = Nest.MapStructure(swap_batch_timestep, inputs).ToTensors(); } - var flatted_inptus = nest.flatten(inputs); - var time_steps = flatted_inptus[0].shape[0]; - var batch = flatted_inptus[0].shape[1]; - var time_step_t = tf.shape(flatted_inptus[0])[0]; + var flatted_inptus = Nest.Flatten(inputs).ToList(); + var first_flatted_input = flatted_inptus[0]; + var time_steps = first_flatted_input.shape[0]; + var batch = first_flatted_input.shape[1]; + var time_steps_t = (int)first_flatted_input.shape[0]; foreach (var input_ in flatted_inptus) { @@ -508,11 +511,6 @@ Tensors swap_batch_timestep(Tensors input_t) } - if (constants == null) - { - constants = new List(); - } - // tf.where needs its condition tensor to be the same shape as its two // result tensors, but in our case the condition (mask) tensor is // (nsamples, 1), and inputs are (nsamples, ndimensions) or even more. @@ -522,12 +520,12 @@ Tensors swap_batch_timestep(Tensors input_t) Tensors _expand_mask(Tensors mask_t, Tensors input_t, int fixed_dim = 1) { - if (nest.is_nested(mask_t)) + if (!mask_t.IsSingle()) { throw new ValueError($"mask_t is expected to be tensor, but got {mask_t}"); } - if (nest.is_nested(input_t)) + if (!input_t.IsSingle()) { throw new ValueError($"input_t is expected to be tensor, but got {input_t}"); } @@ -575,21 +573,21 @@ Tensors _expand_mask(Tensors mask_t, Tensors input_t, int fixed_dim = 1) - Tensors _process_single_input_t(Tensors input_t) + Tensors _process_single_input_t(Tensor input_t) { - input_t = tf.unstack(input_t); // unstack for time_step dim + var unstaked_input_t = array_ops.unstack(input_t); // unstack for time_step dim if (go_backwards) { - input_t.Reverse(); + unstaked_input_t = unstaked_input_t.Reverse().ToArray(); } - return input_t; + return unstaked_input_t; } // TODO(Wanglongzhi2001) Tensors processed_input; - if (nest.is_nested(inputs)) + if (!inputs.IsSingle()) { - processed_input = nest.map_structure(_process_single_input_t, inputs); + processed_input = inputs.MapStructure(_process_single_input_t).ReduceTo().ToTensors(); } else { @@ -603,334 +601,339 @@ object _get_input_tensor(int time) { inp.Add(t_[time]); } - return nest.pack_sequence_as(inputs, inp); + return Nest.PackSequenceAs(inputs, inp); } - //if (mask != null) - //{ - // var mask_list = tf.unstack(mask); - // if (go_backwards) - // { - // mask_list.Reverse(); - // } - - // for (int i = 0; i < time_steps; i++) - // { - // // TODO(Wanglongzhi2001),deal with _get_input_tensor - // var inp = _get_input_tensor(i); - // var mask_t = mask_list[i]; - // // TODO - // var (output, newStates) = step_function((Tensors)inp, new Tensors { states, constants }); - - // var tiled_mask_t = _expand_mask(mask_t, output); - - // Tensors prev_output; - // if (successive_outputs == null) - // { - // prev_output = tf.zeros_like(output); - // } - // else - // { - // prev_output = successive_outputs[successive_outputs.Length - 1]; - // } - - // output = tf.where(tiled_mask_t, output, prev_output); - - // //var flat_states = nest.flatten(states); - // //var flat_new_states = nest.flatten(newStates); - // var flat_states = states.ToList(); - // var flat_new_states = newStates.ToList(); - - // var tiledMaskT = flat_states - // .Select(s => _expand_mask(mask_t, s)) - // .ToArray(); - // var tuple = Tuple.Create(tiledMaskT); - - // List flat_final_states = new List(); - // foreach (var (m, s, ps) in Enumerable.Zip(tiled_mask_t, flat_new_states, flat_states)) - // { - // flat_final_states.Add(tf.where(m, s, ps)); - // } - - // states = (Tensors)nest.pack_sequence_as(states, flat_final_states); - // if (return_all_outputs) - // { - // successive_outputs.Add(output); - // successive_states.Add(states); - // } - // else - // { - // successive_outputs = new Tensors { output }; - // successive_states = new Tensors { states }; - // } - - // } - // last_output = successive_outputs[successive_outputs.Length - 1]; - // new_states = successive_states[successive_states.Length - 1]; - // outputs = tf.stack(successive_outputs); - - // if (zero_output_for_mask) - // { - // last_output = tf.where(_expand_mask(mask_list[mask_list.Length - 1], last_output), last_output, tf.zeros_like(last_output)); - // outputs = tf.where(_expand_mask(mask, outputs, fixed_dim: 2), outputs, tf.zeros_like(outputs)); - // } - // else // mask is null - // { - // for (int i = 0; i < time_steps; i++) - // { - // var inp = _get_input_tensor(i); - // var (output, newStates) = step_function((Tensors)inp, new Tensors { states, constants }); - // states = newStates; - - // if (return_all_outputs) - // { - // successive_outputs.Add(output); - // successive_states.Add(newStates); - // } - // else - // { - // successive_outputs = new Tensors { output }; - // successive_states = new Tensors { newStates }; - // } - // } - // last_output = successive_outputs[successive_outputs.Length - 1]; - // new_states = successive_states[successive_states.Length - 1]; - // outputs = tf.stack(successive_outputs); - // } - //} + if (mask != null) + { + var mask_list = tf.unstack(mask); + if (go_backwards) + { + mask_list.Reverse(); + } + + for (int i = 0; i < time_steps; i++) + { + // TODO(Wanglongzhi2001),deal with _get_input_tensor + var inp = _get_input_tensor(i); + var mask_t = mask_list[i]; + // TODO + var (output, newStates) = step_function((Tensors)inp, states.MergeWith(constants)); + + var tiled_mask_t = _expand_mask(mask_t, output); + + Tensors prev_output; + if (successive_outputs == null) + { + prev_output = tf.zeros_like(output); + } + else + { + prev_output = successive_outputs[successive_outputs.Length - 1]; + } + + output = tf.where(tiled_mask_t, output, prev_output); + + var flat_states = Nest.Flatten(states).ToList(); + var flat_new_states = Nest.Flatten(newStates).ToList(); + + var tiledMaskT = flat_states + .Select(s => _expand_mask(mask_t, s)) + .ToArray(); + var tuple = Tuple.Create(tiledMaskT); + + List flat_final_states = new List(); + foreach (var (m, s, ps) in zip(tiled_mask_t.ToList(), flat_new_states, flat_states)) + { + flat_final_states.Add(tf.where(m, s, ps)); + } + + states = Nest.PackSequenceAs(states, flat_final_states).ToTensors(); + if (return_all_outputs) + { + successive_outputs.Add(output); + successive_states.Add(states); + } + else + { + successive_outputs = new Tensors { output }; + successive_states = new Tensors { states }; + } + + } + last_output = successive_outputs[successive_outputs.Length - 1]; + new_states = successive_states[successive_states.Length - 1]; + outputs = tf.stack(successive_outputs); + + if (zero_output_for_mask) + { + last_output = tf.where(_expand_mask(mask_list[mask_list.Length - 1], last_output), last_output, tf.zeros_like(last_output)); + outputs = tf.where(_expand_mask(mask, outputs, fixed_dim: 2), outputs, tf.zeros_like(outputs)); + } + else // mask is null + { + for (int i = 0; i < time_steps; i++) + { + var inp = _get_input_tensor(i); + var (output, newStates) = step_function((Tensors)inp, states.MergeWith(constants)); + states = newStates; + + if (return_all_outputs) + { + successive_outputs.Add(output); + successive_states.Add(newStates); + } + else + { + successive_outputs = new Tensors { output }; + successive_states = new Tensors { newStates }; + } + } + last_output = successive_outputs[successive_outputs.Length - 1]; + new_states = successive_states[successive_states.Length - 1]; + outputs = tf.stack(successive_outputs); + } + } + } + else // unroll == false + { + var states = initial_states; + // Create input tensor array, if the inputs is nested tensors, then it + // will be flattened first, and tensor array will be created one per + // flattened tensor. + var input_ta = new List(); + for (int i = 0; i < flatted_inptus.Count; i++) + { + input_ta.Add(tf.TensorArray(dtype: flatted_inptus[i].dtype, size: time_steps_t)); + } + + foreach(var (ta, input_) in zip(input_ta, flatted_inptus)) + { + if (!go_backwards) + { + ta.unstack(input_); + } + else + { + ta.unstack(reverse(input_, 0)); + } + } + + // Get the time(0) input and compute the output for that, the output will + // be used to determine the dtype of output tensor array. Don't read from + // input_ta due to TensorArray clear_after_read default to True. + var inps = new Tensors(); + foreach (var inp in flatted_inptus) + { + inps.Add(inp[0]); + } + var input_time_zero = Nest.PackSequenceAs(inputs, inps).ToTensors(); + + // output_time_zero is used to determine the cell output shape and its + // dtype. the value is discarded. + (output_time_zero, _) = step_function((Tensor)input_time_zero, + constants is null ? initial_states : initial_states.MergeWith(constants)); + + int output_ta_size = return_all_outputs ? time_steps_t : 1; + var output_ta = new List(); + for (int i = 0; i < output_time_zero.ToList().Count; i++) + { + var Out = output_time_zero.ToList()[i]; + output_ta.Add(tf.TensorArray(dtype: Out.dtype, size: output_ta_size, element_shape: Out.shape)); + } + + var time = tf.constant(0, dtype: TF_DataType.TF_INT32, name: "time"); + + + + Func? masking_fn; + Func? compute_masked_output = null; + if (mask != null) + { + if (go_backwards) + { + mask = tf.reverse(mask, axis: new[] { 0 }); + } + var mask_ta = tf.TensorArray(dtype: TF_DataType.TF_BOOL, size: time_steps_t); + mask_ta = mask_ta.unstack(mask); + + masking_fn = (time) => + { + return mask_ta.read(time); + }; + + compute_masked_output = (mask_t, flat_out, flat_mask) => + { + var tiled_mask_t = new Tensors(); + foreach (var o in flat_out) + { + tiled_mask_t.Add(_expand_mask(mask_t, o, fixed_dim: mask_t.rank)); + } + + Tensors res = new Tensors(); + foreach (var (m, o, fm) in zip(tiled_mask_t.ToList(), flat_out.ToList(), flat_mask.ToList())) + { + res.Add(tf.where(m, o, fm)); + } + return res; + }; + } + // TODO(Wanglongzhi2001), what the input_length's type should be(an integer or a single tensor)? + else if (input_length is Tensor) + { + if (go_backwards) + { + var max_len = tf.reduce_max(input_length, axis: 0); + var rev_input_length = tf.subtract(max_len - 1, input_length); + + masking_fn = (time) => + { + return tf.less(rev_input_length, time); + }; + } + else + { + masking_fn = (time) => + { + return tf.greater(input_length, time); + }; + } + + compute_masked_output = (mask_t, flat_out, flat_mask) => + { + var res = new List(); + foreach (var (o, zo) in zip(flat_out, flat_mask)) + { + res.Add(tf.where(mask_t, o, zo)); + } + return res; + }; + } + else + { + masking_fn = null; + } + + Func cond = (time) => (time < time_steps_t); + int parallel_iterations = 32; + if (masking_fn != null) + { + // Mask for the T output will be base on the output of T - 1. In the + // case T = 0, a zero filled tensor will be used. + var flat_zero_output = new Tensors(); + foreach (var o in Nest.Flatten(output_time_zero)) + { + flat_zero_output.Add(tf.zeros_like(o)); + } + + var prev_output = flat_zero_output; + var output_ta_t = output_ta; + Tensor _step(Tensor time) + { + /* + RNN step function. + Args: + time: Current timestep value. + output_ta_t: TensorArray. + prev_output: tuple of outputs from time - 1. + *states: List of states. + Returns: + Tuple(todo): `(time + 1, output_ta_t, output) + tuple(new_states)` + */ + + var flat_current_input = input_ta.Select(x => x.read(time)).ToList(); + // maybe set shape + // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type + var current_input = Nest.PackSequenceAs(inputs, flat_current_input).ToTensors(); + var mask_t = masking_fn(time); + var (output, new_states_internal) = step_function(current_input, states.MergeWith(constants)); + // mask output + var flat_output = Nest.Flatten(output).ToList(); + + var flat_mask_output = zero_output_for_mask ? flat_zero_output : prev_output.ToList(); + + // TODO(Wanglongzhi2001),deal with compute_masked_output's third parameter's type + var flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output); + + // mask states + var flat_state = states.ToList(); + var flat_new_state = new_states_internal.ToList(); + + foreach (var (state, new_state) in zip(flat_state, flat_new_state)) + { + if (new_state is Tensor) + { + new_state.shape = state.shape; + } + } + + var flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state); + new_states_internal = Nest.PackSequenceAs(new_states, flat_final_state).ToTensors(); + + var ta_index_to_write = return_all_outputs ? time : tf.constant(0); + // TODO(Wanglongzhi2001),deal with zip output_ta_t + foreach (var (ta, Out) in zip(output_ta_t, flat_new_output)) + { + output_ta_t.Add(ta.write(ta_index_to_write, Out)); + } + + new_states_internal = Nest.PackSequenceAs(initial_states, flat_new_state).ToTensors(); + + output_ta = output_ta_t; + new_states = new_states_internal; + return time + 1; + + } + var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: time, parallel_iterations: parallel_iterations); + } + else + { + var output_ta_t = output_ta; + new_states = states; + Tensor _step(Tensor time) + { + var flat_current_input = input_ta.Select(x => x.read(time)).ToList(); + // maybe set shape + // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type + var current_input = Nest.PackSequenceAs(inputs, flat_current_input).ToTensors(); + var (output, new_states_internal) = step_function(current_input, new_states.MergeWith(constants)); + var flat_state = new_states.Flatten().ToList(); + var flat_new_state = new_states_internal.Flatten().ToList(); + foreach (var (state, new_state) in zip(flat_state, flat_new_state)) + { + if (new_state is Tensor) + { + new_state.shape = state.shape; + } + } + var flat_output = Nest.Flatten(output); + var ta_index_to_write = return_all_outputs ? time : tf.constant(0); + output_ta_t = zip(output_ta_t, flat_output).Select(item => + { + var (ta, out_) = item; + return ta.write(ta_index_to_write, out_); + }).ToList(); + + new_states_internal = Nest.PackSequenceAs(initial_states, flat_new_state).ToTensors(); + output_ta = output_ta_t; + new_states = new_states_internal; + return time + 1; + } + var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: time, parallel_iterations: parallel_iterations); + } + //Tensors outputs = new Tensors(); + foreach (var o in output_ta) + { + outputs.Add(o.stack()); + } + foreach (var o in outputs) + { + last_output.Add(o[-1]); + } + outputs = Nest.PackSequenceAs(output_time_zero, outputs).ToTensors(); + last_output = Nest.PackSequenceAs(output_time_zero, last_output).ToTensors(); + } - //else // unroll == false - //{ - // var states = initial_states; - // // Create input tensor array, if the inputs is nested tensors, then it - // // will be flattened first, and tensor array will be created one per - // // flattened tensor. - // var input_ta = new List(); - // for (int i = 0; i < flatted_inptus.Count; i++) - // { - // input_ta.Add(tf.TensorArray(dtype: flatted_inptus[i].dtype, size: time_step_t)); - // } - - // // Get the time(0) input and compute the output for that, the output will - // // be used to determine the dtype of output tensor array. Don't read from - // // input_ta due to TensorArray clear_after_read default to True. - // var inps = new Tensors(); - // foreach (var inp in flatted_inptus) - // { - // inps.Add(inp[0]); - // } - // var input_time_zero = nest.pack_sequence_as(inputs, inps); - - // // output_time_zero is used to determine the cell output shape and its - // // dtype. the value is discarded. - // (output_time_zero, _) = step_function((Tensor)input_time_zero, new Tensors { initial_states, constants }); - - // var output_ta_size = return_all_outputs ? time_step_t : tf.constant(1); - // var output_ta = new List(); - // for (int i = 0; i < output_time_zero.ToList().Count; i++) - // { - // var Out = output_time_zero.ToList()[i]; - // output_ta.Add(tf.TensorArray(dtype: Out.dtype, size: output_ta_size, element_shape: Out.shape)); - // } - - // var time = tf.constant(0, dtype: TF_DataType.TF_INT32, name: "time"); - - - - // Func? masking_fn; - // Func? compute_masked_output = null; - // if (mask != null) - // { - // if (go_backwards) - // { - // mask = tf.reverse(mask, axis: new[] { 0 }); - // } - // var mask_ta = tf.TensorArray(dtype: TF_DataType.TF_BOOL, size: time_step_t); - // mask_ta = mask_ta.unstack(mask); - - // masking_fn = (time) => - // { - // return mask_ta.read(time); - // }; - - // compute_masked_output = (mask_t, flat_out, flat_mask) => - // { - // var tiled_mask_t = new Tensors(); - // foreach (var o in flat_out) - // { - // tiled_mask_t.Add(_expand_mask(mask_t, o, fixed_dim: mask_t.rank)); - // } - - // Tensors res = new Tensors(); - // foreach (var (m, o, fm) in Enumerable.Zip(tiled_mask_t, flat_out, flat_mask)) - // { - // res.Add(tf.where(m, o, fm)); - // } - // return res; - // }; - // } - // // TODO(Wanglongzhi2001), what the input_length's type should be(an integer or a single tensor)? - // else if (input_length is Tensor) - // { - // if (go_backwards) - // { - // var max_len = tf.reduce_max(input_length, axis: 0); - // var rev_input_length = tf.subtract(max_len - 1, input_length); - - // masking_fn = (time) => - // { - // return tf.less(rev_input_length, time); - // }; - // } - // else - // { - // masking_fn = (time) => - // { - // return tf.greater(input_length, time); - // }; - // } - - // compute_masked_output = (mask_t, flat_out, flat_mask) => - // { - // var res = new List(); - // foreach (var (o, zo) in zip(flat_out, flat_mask)) - // { - // res.Add(tf.where(mask_t, o, zo)); - // } - // return res; - // }; - // } - // else - // { - // masking_fn = null; - // } - - - // if (masking_fn != null) - // { - // // Mask for the T output will be base on the output of T - 1. In the - // // case T = 0, a zero filled tensor will be used. - // var flat_zero_output = new Tensors(); - // foreach (var o in nest.flatten(output_time_zero)) - // { - // flat_zero_output.Add(tf.zeros_like(o)); - // } - - - // (Tensor, List, Tensors, Tensors) _step(Tensor time, List output_ta_t, Tensors prev_output, Tensors states) - // { - // /* - // RNN step function. - // Args: - // time: Current timestep value. - // output_ta_t: TensorArray. - // prev_output: tuple of outputs from time - 1. - // *states: List of states. - // Returns: - // Tuple(todo): `(time + 1, output_ta_t, output) + tuple(new_states)` - // */ - - // var current_input = input_ta.Select(x => x.read(time)).ToList(); - // // maybe set shape - // // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type - // current_input = (List)nest.pack_sequence_as(inputs, current_input); - // var mask_t = masking_fn(time); - // var (output, new_states) = step_function(current_input, new Tensors { states, constants }); - // // mask output - // //var flat_output = nest.flatten(output); - // var flat_output = output.ToList(); - - // var flat_mask_output = zero_output_for_mask ? flat_zero_output : prev_output.ToList(); - - // // TODO(Wanglongzhi2001),deal with compute_masked_output's third parameter's type - // var flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output); - - // // mask states - // var flat_state = states.ToList(); - // var flat_new_state = new_states.ToList(); - - // foreach (var (state, new_state) in zip(flat_state, flat_new_state)) - // { - // if (new_state is Tensor) - // { - // new_state.set_shape(state.shape); - // } - // } - - // var flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state); - // new_states = (Tensors)nest.pack_sequence_as(new_states, flat_final_state); - - // var ta_index_to_write = return_all_outputs ? time : tf.constant(0); - // var Output_ta_t = new List(); - // // TODO(Wanglongzhi2001),deal with zip output_ta_t - // foreach (var (ta, Out) in zip(output_ta_t, flat_new_output)) - // { - // Output_ta_t.Add(ta.write(ta_index_to_write, Out)); - // } - - - - // //new_states = (Tensors)nest.pack_sequence_as(initial_states, flat_new_state); - - - // return (time + 1, Output_ta_t, flat_new_output, new_states); - - // } - // Func cond = (time) => (time < time_step_t); - - // var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: (time, output_ta, flat_zero_output, states)); - // new_states = final_outputs.Item4; - // output_ta = final_outputs.Item2; - - // } - // else - // { - // (Tensor, List, Tensors) _step(Tensor time, List output_ta_t, Tensors states) - // { - // var current_input = input_ta.Select(x => x.read(time)).ToList(); - // // maybe set shape - // // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type - // current_input = (List)nest.pack_sequence_as(inputs, current_input); - // var (output, new_states) = step_function(current_input, new Tensors { states, constants }); - // var flat_state = states.ToList(); - // var flat_new_state = new_states.ToList(); - // foreach (var (state, new_state) in zip(flat_state, flat_new_state)) - // { - // if (new_state is Tensor) - // { - // new_state.set_shape(state.shape); - // } - // } - // var flat_output = output.ToList(); - // var ta_index_to_write = return_all_outputs ? time : tf.constant(0); - // var Output_ta_t = new List(); - // foreach (var (ta, out_) in zip(output_ta_t, flat_output)) - // { - // Output_ta_t.Add(ta.write(ta_index_to_write, out_)); - // } - - // new_states = (Tensors)nest.pack_sequence_as(initial_states, flat_new_state); - // return (time + 1, Output_ta_t, new_states); - // } - // Func cond = (time) => (time < time_step_t); - // var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: (time, output_ta, states)); - // new_states = final_outputs.Item3; - // output_ta = final_outputs.Item2; - - // } - // //Tensors outputs = new Tensors(); - // foreach (var o in output_ta) - // { - // outputs.Add(o.stack()); - // } - // foreach (var o in outputs) - // { - // last_output.Add(o[-1]); - // } - // outputs = (Tensors)nest.pack_sequence_as(output_time_zero, outputs); - // last_output = (Tensors)nest.pack_sequence_as(output_time_zero, last_output); - - //} Func set_shape; set_shape = (output_) => @@ -947,18 +950,38 @@ object _get_input_tensor(int time) shape[0] = 1; } shape[1] = (int)batch; - output_.set_shape(new Tensor(shape)); + output_.shape = shape; } return output_; }; - var Outputs = (Tensors)nest.map_structure(set_shape, outputs); + outputs = Nest.MapStructure(set_shape, outputs).ToTensors(); if (!time_major) { - Outputs = nest.map_structure(swap_batch_timestep, outputs); + outputs = Nest.MapStructure(swap_batch_timestep, outputs).ToTensors(); + } + return (last_output, outputs, new_states); + + } + + public Tensor reverse(Tensor input, int axis) + { + return reverse(input, new int[] { axis }); + } + + public Tensor reverse(Tensor input, int[] axes) + { + return tf.reverse(input, axes); + } + + public Tensor maybe_convert_to_ragged(bool is_ragged_output, Tensor output, int nested_row_lengths, bool go_backwards = false) + { + if (!is_ragged_output) + { + return output; } - return (last_output, Outputs, new_states); + throw new NotImplementedException("Not implemented currently, please submit an issue to https://github.com/SciSharp/TensorFlow.NET/issues"); } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index b014737f6..ab4cef124 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -55,8 +55,8 @@ public Tensors States if (_states == null) { // CHECK(Rinne): check if this is correct. - var state = nest.map_structure(x => null, _cell.StateSize); - return new Tensors { state }; + var nested = _cell.StateSize.MapStructure(x => null); + _states = nested.AsNest().ToTensors(); } return _states; } @@ -230,7 +230,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo Tensors? mask = rnn_optional_args?.Mask; //var (inputs_padded, row_length) = BackendImpl.convert_inputs_if_ragged(inputs); // 暂时先不接受ragged tensor - int? row_length = null; + int row_length = 0; // TODO(Rinne): support this param. bool is_ragged_input = false; _validate_args_if_ragged(is_ragged_input, mask); @@ -249,16 +249,16 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo if (mask != null) { // Time step masks must be the same for each input. - mask = nest.flatten(mask)[0]; + mask = mask.Flatten().First(); } Shape input_shape; - if (nest.is_nested(inputs)) + if (!inputs.IsSingle()) { // In the case of nested input, use the first element for shape check // input_shape = nest.flatten(inputs)[0].shape; // TODO(Wanglongzhi2001) - input_shape = nest.flatten(inputs)[0].shape; + input_shape = inputs.Flatten().First().shape; } else { @@ -286,6 +286,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo // cell_call_fn = (self.cell.__call__ if callable(self.cell) else self.cell.call) Func step; + bool is_tf_rnn_cell = _cell.IsTFRnnCell; if (constants is not null) { if (!_cell.SupportOptionalArgs) @@ -299,7 +300,8 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo { constants = new Tensors(states.TakeLast(_num_constants)); states = new Tensors(states.SkipLast(_num_constants)); - var(output, new_states) = _cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); + states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states; + var (output, new_states) = _cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); // TODO(Wanglongzhi2001),should cell_call_fn's return value be Tensors, Tensors? return (output, new_states.Single); }; @@ -308,13 +310,13 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo { step = (inputs, states) => { - // states = (states[0] if len(states) == 1 and is_tf_rnn_cell else states) + states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states; var (output, new_states) = _cell.Apply(inputs, states); return (output, new_states.Single); }; } - var (last_output, outputs, states) = BackendImpl.rnn(step, + var (last_output, outputs, states) = keras.backend.rnn(step, inputs, initial_state, constants: constants, @@ -334,8 +336,8 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo Tensors output = new Tensors(); if (_args.ReturnSequences) { - throw new NotImplementedException("this argument havn't been developed."); - + // TODO(Rinne): add go_backwards parameter and revise the `row_length` param + output = keras.backend.maybe_convert_to_ragged(is_ragged_input, outputs, row_length, false); } else { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs index fcb5d1ebf..751312e5d 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs @@ -14,8 +14,8 @@ public abstract class RnnCellBase: Layer, IRnnCell public RnnCellBase(LayerArgs args) : base(args) { } public abstract GeneralizedTensorShape StateSize { get; } public abstract GeneralizedTensorShape OutputSize { get; } + public abstract bool IsTFRnnCell { get; } public abstract bool SupportOptionalArgs { get; } - public abstract (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null); public virtual Tensors GetInitialState(Tensors inputs, long batch_size, TF_DataType dtype) { return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size, dtype); diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs index abb57d8ad..f0b2ed4d7 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs @@ -5,6 +5,7 @@ using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; using Tensorflow.Common.Types; +using Tensorflow.Common.Extensions; namespace Tensorflow.Keras.Layers.Rnn { @@ -26,6 +27,7 @@ public class SimpleRNNCell : DropoutRNNCellMixin public override GeneralizedTensorShape StateSize => _state_size; public override GeneralizedTensorShape OutputSize => _output_size; + public override bool IsTFRnnCell => true; public override bool SupportOptionalArgs => false; public SimpleRNNCell(SimpleRNNCellArgs args) : base(args) @@ -66,37 +68,22 @@ public override void build(KerasShapesWrapper input_shape) built = true; } - public override (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null) + // TODO(Rinne): revise the trining param (with refactoring of the framework) + protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null) { // TODO(Rinne): check if it will have multiple tensors when not nested. - Tensor prev_output = states[0]; + Tensors prev_output = Nest.IsNested(states) ? new Tensors(states[0]) : states; var dp_mask = get_dropout_maskcell_for_cell(inputs, training.Value); var rec_dp_mask = get_recurrent_dropout_maskcell_for_cell(prev_output, training.Value); Tensor h; - var ranks = inputs.rank; if (dp_mask != null) { - if (ranks > 2) - { - // 因为multiply函数会自动添加第一个维度,所以加上下标0 - h = tf.linalg.tensordot(math_ops.multiply(inputs, dp_mask)[0], _kernel.AsTensor(), new[,] { { ranks - 1 }, { 0 } }); - } - else - { - h = math_ops.matmul(math_ops.multiply(inputs, dp_mask)[0], _kernel.AsTensor()); - } + h = math_ops.matmul(math_ops.multiply(inputs.Single, dp_mask.Single), _kernel.AsTensor()); } else { - if (ranks > 2) - { - h = tf.linalg.tensordot(inputs, _kernel.AsTensor(), new[,] { { ranks - 1 }, { 0 } }); - } - else - { - h = math_ops.matmul(inputs, _kernel.AsTensor()); - } + h = math_ops.matmul(inputs, _kernel.AsTensor()); } if (_bias != null) @@ -106,26 +93,25 @@ public override (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? tra if (rec_dp_mask != null) { - prev_output = math_ops.multiply(prev_output, rec_dp_mask)[0]; + prev_output = math_ops.multiply(prev_output, rec_dp_mask); } - ranks = prev_output.rank; - Tensor output; - if (ranks > 2) + Tensor output = h + math_ops.matmul(prev_output, _recurrent_kernel.AsTensor()); + + if (_args.Activation != null) { - output = h + tf.linalg.tensordot(prev_output[0], _recurrent_kernel.AsTensor(), new[,] { { ranks - 1 }, { 0 } }); + output = _args.Activation.Apply(output); } - else + if (Nest.IsNested(states)) { - output = h + math_ops.matmul(prev_output, _recurrent_kernel.AsTensor()); + return new Nest(new List> { + new Nest(new List> { new Nest(output) }), new Nest(output) }) + .ToTensors(); } - Console.WriteLine($"shape of output: {output.shape}"); - - if (_args.Activation != null) + else { - output = _args.Activation.Apply(output); + return new Tensors(output, output); } - return (output, new Tensors { output }); } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs index 7923192fa..0b92fd3cf 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs @@ -170,6 +170,7 @@ public void from_config() } public GeneralizedTensorShape StateSize => throw new NotImplementedException(); public GeneralizedTensorShape OutputSize => throw new NotImplementedException(); + public bool IsTFRnnCell => throw new NotImplementedException(); public bool SupportOptionalArgs => throw new NotImplementedException(); } } diff --git a/src/TensorflowNET.Hub/KerasLayer.cs b/src/TensorflowNET.Hub/KerasLayer.cs index b9ca949bc..20d9851b1 100644 --- a/src/TensorflowNET.Hub/KerasLayer.cs +++ b/src/TensorflowNET.Hub/KerasLayer.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Keras.Engine; using Tensorflow.Train; using Tensorflow.Training; @@ -89,7 +90,7 @@ private void _setup_layer(bool trainable = false) } } - protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null) + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optionalArgs = null) { _check_trainability(); From dcaa0f40d1f81b8e089f9ec77d85ca42e0933d80 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Wed, 7 Jun 2023 09:16:49 +0800 Subject: [PATCH 034/182] fix: some possible errors of RNN. --- src/TensorFlowNET.Core/Tensors/Tensors.cs | 41 +++++++++++++++++------ src/TensorFlowNET.Keras/BackendImpl.cs | 40 +++++++++------------- 2 files changed, 46 insertions(+), 35 deletions(-) diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs index cba8f9541..259b1eec7 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensors.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs @@ -58,17 +58,12 @@ public Tensor? SingleOrNull public Tensor this[params string[] slices] => this.First()[slices]; - public Tensors(Tensor tensor) : base(tensor) - { - - } - private Tensors(Nest nested) : base(nested) { } - public Tensors(params Tensor[] tensors): base(tensors.Select(x => new Nest(x))) + public Tensors(params Tensor[] tensors): base(DealWithConstructorArrayInput(tensors)) { } @@ -83,6 +78,22 @@ public Tensors(NDArray nd): base(ops.convert_to_tensor(nd)) } + private static Nest DealWithConstructorArrayInput(Tensor[] tensors) + { + if (tensors.Length == 0) + { + return Nest.Empty; + } + else if(tensors.Length == 1) + { + return new Nest(tensors[0]); + } + else + { + return new Nest(tensors.Select(x => new Nest(x))); + } + } + public bool IsSingle() { return Length == 1; @@ -107,9 +118,14 @@ public void Add(Tensor tensor) ListValue = new() { new Nest(Value), new Nest(tensor) }; Value = null; } - else + else if(NestType == NestType.List) + { + ListValue!.Add(new Nest(tensor)); + } + else //Empty { - ListValue.Add(new Nest(tensor)); + NestType = NestType.Node; + Value = tensor; } } @@ -128,9 +144,14 @@ public void AddRange(IEnumerable tensors) ListValue.AddRange(tensors.Select(x => new Nest(x))); Value = null; } - else + else if(NestType == NestType.List) { - ListValue.AddRange(tensors.Select(x => new Nest(x))); + ListValue!.AddRange(tensors.Select(x => new Nest(x))); + } + else // empty + { + NestType = NestType.List; + ListValue = tensors.Select(x => new Nest(x)).ToList(); } } diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index 30b73e82f..144910669 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -651,13 +651,13 @@ object _get_input_tensor(int time) states = Nest.PackSequenceAs(states, flat_final_states).ToTensors(); if (return_all_outputs) { - successive_outputs.Add(output); - successive_states.Add(states); + successive_outputs = successive_outputs.MergeWith(output); + successive_outputs = successive_states.MergeWith(states); } else { - successive_outputs = new Tensors { output }; - successive_states = new Tensors { states }; + successive_outputs = new Tensors(output); + successive_states = new Tensors(states); } } @@ -722,16 +722,11 @@ object _get_input_tensor(int time) // Get the time(0) input and compute the output for that, the output will // be used to determine the dtype of output tensor array. Don't read from // input_ta due to TensorArray clear_after_read default to True. - var inps = new Tensors(); - foreach (var inp in flatted_inptus) - { - inps.Add(inp[0]); - } - var input_time_zero = Nest.PackSequenceAs(inputs, inps).ToTensors(); + var input_time_zero = Nest.PackSequenceAs(inputs, flatted_inptus.Select(x => x[0]).ToArray()).ToTensors(); // output_time_zero is used to determine the cell output shape and its // dtype. the value is discarded. - (output_time_zero, _) = step_function((Tensor)input_time_zero, + (output_time_zero, _) = step_function(input_time_zero, constants is null ? initial_states : initial_states.MergeWith(constants)); int output_ta_size = return_all_outputs ? time_steps_t : 1; @@ -816,6 +811,7 @@ object _get_input_tensor(int time) Func cond = (time) => (time < time_steps_t); int parallel_iterations = 32; + new_states = states; if (masking_fn != null) { // Mask for the T output will be base on the output of T - 1. In the @@ -846,7 +842,7 @@ RNN step function. // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type var current_input = Nest.PackSequenceAs(inputs, flat_current_input).ToTensors(); var mask_t = masking_fn(time); - var (output, new_states_internal) = step_function(current_input, states.MergeWith(constants)); + var (output, new_states_internal) = step_function(current_input, new_states.MergeWith(constants)); // mask output var flat_output = Nest.Flatten(output).ToList(); @@ -871,11 +867,12 @@ RNN step function. new_states_internal = Nest.PackSequenceAs(new_states, flat_final_state).ToTensors(); var ta_index_to_write = return_all_outputs ? time : tf.constant(0); - // TODO(Wanglongzhi2001),deal with zip output_ta_t - foreach (var (ta, Out) in zip(output_ta_t, flat_new_output)) + output_ta_t = zip(output_ta_t, flat_new_output).Select(item => { - output_ta_t.Add(ta.write(ta_index_to_write, Out)); - } + var (ta, out_) = item; + return ta.write(ta_index_to_write, out_); + }).ToList(); + new_states_internal = Nest.PackSequenceAs(initial_states, flat_new_state).ToTensors(); @@ -921,15 +918,8 @@ Tensor _step(Tensor time) } var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: time, parallel_iterations: parallel_iterations); } - //Tensors outputs = new Tensors(); - foreach (var o in output_ta) - { - outputs.Add(o.stack()); - } - foreach (var o in outputs) - { - last_output.Add(o[-1]); - } + outputs = outputs.MergeWith(output_ta.Select(o => o.stack()).ToTensors()); + last_output = last_output.MergeWith(outputs.Select(o => o[-1]).ToTensors()); outputs = Nest.PackSequenceAs(output_time_zero, outputs).ToTensors(); last_output = Nest.PackSequenceAs(output_time_zero, last_output).ToTensors(); From db8e43b241cbc86a707bab7f0da5d4a0861820ec Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Mon, 12 Jun 2023 17:59:07 +0800 Subject: [PATCH 035/182] Add feature(not completed):add SimpleRNNCell, StackedRNNCell, RNN and test --- .../Common/Types/GeneralizedTensorShape.cs | 14 +- .../Keras/ArgsDefinition/Rnn/RNNArgs.cs | 3 + .../ArgsDefinition/Rnn/StackedRNNCellsArgs.cs | 3 +- .../Keras/Layers/ILayersApi.cs | 34 ++++ .../Operations/_EagerTensorArray.cs | 14 +- .../Operations/_GraphTensorArray.cs | 5 +- src/TensorFlowNET.Keras/BackendImpl.cs | 27 +-- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 77 +++++++++ .../Layers/Rnn/DropoutRNNCellMixin.cs | 15 ++ src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 76 ++++++--- .../Layers/Rnn/SimpleRNNCell.cs | 10 +- .../Layers/Rnn/StackedRNNCells.cs | 159 +++++++++++------- .../Callbacks/EarlystoppingTest.cs | 25 ++- .../Layers/Rnn.Test.cs | 102 ++++++++++- 14 files changed, 445 insertions(+), 119 deletions(-) diff --git a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs index e05d3deb3..c61d04b25 100644 --- a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs +++ b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs @@ -12,9 +12,14 @@ public class GeneralizedTensorShape: IEnumerable, INestStructure /// create a single-dim generalized Tensor shape. /// /// - public GeneralizedTensorShape(int dim) + public GeneralizedTensorShape(int dim, int size = 1) { - Shapes = new TensorShapeConfig[] { new TensorShapeConfig() { Items = new long?[] { dim } } }; + var elem = new TensorShapeConfig() { Items = new long?[] { dim } }; + Shapes = Enumerable.Repeat(elem, size).ToArray(); + //Shapes = new TensorShapeConfig[size]; + //Shapes.Initialize(new TensorShapeConfig() { Items = new long?[] { dim } }); + //Array.Initialize(Shapes, new TensorShapeConfig() { Items = new long?[] { dim } }); + ////Shapes = new TensorShapeConfig[] { new TensorShapeConfig() { Items = new long?[] { dim } } }; } public GeneralizedTensorShape(Shape shape) @@ -113,6 +118,11 @@ public INestStructure MapStructure(Func func) return new Nest(Shapes.Select(s => DealWithSingleShape(s))); } } + + + + public static implicit operator GeneralizedTensorShape(int dims) + => new GeneralizedTensorShape(dims); public IEnumerator GetEnumerator() { diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs index ed5a1d6dd..116ff7a2f 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs @@ -10,6 +10,9 @@ public class RNNArgs : AutoSerializeLayerArgs [JsonProperty("cell")] // TODO: the cell should be serialized with `serialize_keras_object`. public IRnnCell Cell { get; set; } = null; + [JsonProperty("cells")] + public IList Cells { get; set; } = null; + [JsonProperty("return_sequences")] public bool ReturnSequences { get; set; } = false; [JsonProperty("return_state")] diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs index fdfadab85..ea6f830b8 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs @@ -1,10 +1,11 @@ using System.Collections.Generic; +using Tensorflow.Keras.Layers.Rnn; namespace Tensorflow.Keras.ArgsDefinition.Rnn { public class StackedRNNCellsArgs : LayerArgs { - public IList Cells { get; set; } + public IList Cells { get; set; } public Dictionary Kwargs { get; set; } = null; } } diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index 6a29f9e5e..3b2238164 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -1,5 +1,6 @@ using System; using Tensorflow.Framework.Models; +using Tensorflow.Keras.Layers.Rnn; using Tensorflow.NumPy; using static Google.Protobuf.Reflection.FieldDescriptorProto.Types; @@ -192,6 +193,19 @@ public ILayer Rescaling(float scale, float offset = 0, Shape input_shape = null); + public IRnnCell SimpleRNNCell( + int units, + string activation = "tanh", + bool use_bias = true, + string kernel_initializer = "glorot_uniform", + string recurrent_initializer = "orthogonal", + string bias_initializer = "zeros", + float dropout = 0f, + float recurrent_dropout = 0f); + + public IRnnCell StackedRNNCells( + IEnumerable cells); + public ILayer SimpleRNN(int units, string activation = "tanh", string kernel_initializer = "glorot_uniform", @@ -200,6 +214,26 @@ public ILayer SimpleRNN(int units, bool return_sequences = false, bool return_state = false); + public ILayer RNN( + IRnnCell cell, + bool return_sequences = false, + bool return_state = false, + bool go_backwards = false, + bool stateful = false, + bool unroll = false, + bool time_major = false + ); + + public ILayer RNN( + IEnumerable cell, + bool return_sequences = false, + bool return_state = false, + bool go_backwards = false, + bool stateful = false, + bool unroll = false, + bool time_major = false + ); + public ILayer Subtract(); } } diff --git a/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs b/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs index ed65a08d7..08e73fe67 100644 --- a/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs @@ -109,7 +109,19 @@ public TensorArray scatter(Tensor indices, Tensor value, string name = null) return ta; });*/ - throw new NotImplementedException(""); + //if (indices is EagerTensor) + //{ + // indices = indices as EagerTensor; + // indices = indices.numpy(); + //} + + //foreach (var (index, val) in zip(indices.ToArray(), array_ops.unstack(value))) + //{ + // this.write(index, val); + //} + //return base; + //throw new NotImplementedException(""); + return this; } public void _merge_element_shape(Shape shape) diff --git a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs index 16870e9f6..dde2624af 100644 --- a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs @@ -17,6 +17,7 @@ limitations under the License. using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Eager; using static Tensorflow.Binding; namespace Tensorflow.Operations @@ -146,7 +147,9 @@ public TensorArray scatter(Tensor indices, Tensor value, string name = null) return ta; });*/ - throw new NotImplementedException(""); + + //throw new NotImplementedException(""); + return this; } public void _merge_element_shape(Shape shape) diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index 144910669..1336e9af5 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -510,7 +510,7 @@ Tensor swap_batch_timestep(Tensor input_t) } } - + // tf.where needs its condition tensor to be the same shape as its two // result tensors, but in our case the condition (mask) tensor is // (nsamples, 1), and inputs are (nsamples, ndimensions) or even more. @@ -535,7 +535,7 @@ Tensors _expand_mask(Tensors mask_t, Tensors input_t, int fixed_dim = 1) { mask_t = tf.expand_dims(mask_t, -1); } - var multiples = Enumerable.Repeat(1, fixed_dim).ToArray().concat(input_t.shape.as_int_list().ToList().GetRange(fixed_dim, input_t.rank)); + var multiples = Enumerable.Repeat(1, fixed_dim).ToArray().concat(input_t.shape.as_int_list().Skip(fixed_dim).ToArray()); return tf.tile(mask_t, multiples); } @@ -570,9 +570,6 @@ Tensors _expand_mask(Tensors mask_t, Tensors input_t, int fixed_dim = 1) // individually. The result of this will be a tuple of lists, each of // the item in tuple is list of the tensor with shape (batch, feature) - - - Tensors _process_single_input_t(Tensor input_t) { var unstaked_input_t = array_ops.unstack(input_t); // unstack for time_step dim @@ -609,7 +606,7 @@ object _get_input_tensor(int time) var mask_list = tf.unstack(mask); if (go_backwards) { - mask_list.Reverse(); + mask_list.Reverse().ToArray(); } for (int i = 0; i < time_steps; i++) @@ -629,9 +626,10 @@ object _get_input_tensor(int time) } else { - prev_output = successive_outputs[successive_outputs.Length - 1]; + prev_output = successive_outputs.Last(); } + // output could be a tensor output = tf.where(tiled_mask_t, output, prev_output); var flat_states = Nest.Flatten(states).ToList(); @@ -661,13 +659,13 @@ object _get_input_tensor(int time) } } - last_output = successive_outputs[successive_outputs.Length - 1]; - new_states = successive_states[successive_states.Length - 1]; + last_output = successive_outputs.Last(); + new_states = successive_states.Last(); outputs = tf.stack(successive_outputs); if (zero_output_for_mask) { - last_output = tf.where(_expand_mask(mask_list[mask_list.Length - 1], last_output), last_output, tf.zeros_like(last_output)); + last_output = tf.where(_expand_mask(mask_list.Last(), last_output), last_output, tf.zeros_like(last_output)); outputs = tf.where(_expand_mask(mask, outputs, fixed_dim: 2), outputs, tf.zeros_like(outputs)); } else // mask is null @@ -689,8 +687,8 @@ object _get_input_tensor(int time) successive_states = new Tensors { newStates }; } } - last_output = successive_outputs[successive_outputs.Length - 1]; - new_states = successive_states[successive_states.Length - 1]; + last_output = successive_outputs.Last(); + new_states = successive_states.Last(); outputs = tf.stack(successive_outputs); } } @@ -701,6 +699,8 @@ object _get_input_tensor(int time) // Create input tensor array, if the inputs is nested tensors, then it // will be flattened first, and tensor array will be created one per // flattened tensor. + + var input_ta = new List(); for (int i = 0; i < flatted_inptus.Count; i++) { @@ -719,6 +719,7 @@ object _get_input_tensor(int time) } } + // Get the time(0) input and compute the output for that, the output will // be used to determine the dtype of output tensor array. Don't read from // input_ta due to TensorArray clear_after_read default to True. @@ -773,7 +774,7 @@ object _get_input_tensor(int time) return res; }; } - // TODO(Wanglongzhi2001), what the input_length's type should be(an integer or a single tensor)? + // TODO(Wanglongzhi2001), what the input_length's type should be(an integer or a single tensor), it could be an integer or tensor else if (input_length is Tensor) { if (go_backwards) diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 3b095bc2a..dd25122d5 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -685,6 +685,34 @@ public ILayer LeakyReLU(float alpha = 0.3f) Alpha = alpha }); + + public IRnnCell SimpleRNNCell( + int units, + string activation = "tanh", + bool use_bias = true, + string kernel_initializer = "glorot_uniform", + string recurrent_initializer = "orthogonal", + string bias_initializer = "zeros", + float dropout = 0f, + float recurrent_dropout = 0f) + => new SimpleRNNCell(new SimpleRNNCellArgs + { + Units = units, + Activation = keras.activations.GetActivationFromName(activation), + UseBias = use_bias, + KernelInitializer = GetInitializerByName(kernel_initializer), + RecurrentInitializer = GetInitializerByName(recurrent_initializer), + Dropout = dropout, + RecurrentDropout = recurrent_dropout + }); + + public IRnnCell StackedRNNCells( + IEnumerable cells) + => new StackedRNNCells(new StackedRNNCellsArgs + { + Cells = cells.ToList() + }); + /// /// /// @@ -709,6 +737,55 @@ public ILayer SimpleRNN(int units, ReturnState = return_state }); + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public ILayer RNN( + IRnnCell cell, + bool return_sequences = false, + bool return_state = false, + bool go_backwards = false, + bool stateful = false, + bool unroll = false, + bool time_major = false) + => new RNN(new RNNArgs + { + Cell = cell, + ReturnSequences = return_sequences, + ReturnState = return_state, + GoBackwards = go_backwards, + Stateful = stateful, + Unroll = unroll, + TimeMajor = time_major + }); + + public ILayer RNN( + IEnumerable cell, + bool return_sequences = false, + bool return_state = false, + bool go_backwards = false, + bool stateful = false, + bool unroll = false, + bool time_major = false) + => new RNN(new RNNArgs + { + Cells = cell.ToList(), + ReturnSequences = return_sequences, + ReturnState = return_state, + GoBackwards = go_backwards, + Stateful = stateful, + Unroll = unroll, + TimeMajor = time_major + }); + /// /// Long Short-Term Memory layer - Hochreiter 1997. /// diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs index 21396853f..78d3dac96 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs @@ -17,6 +17,21 @@ public DropoutRNNCellMixin(LayerArgs args): base(args) } + protected void _create_non_trackable_mask_cache() + { + + } + + public void reset_dropout_mask() + { + + } + + public void reset_recurrent_dropout_mask() + { + + } + public Tensors? get_dropout_maskcell_for_cell(Tensors input, bool training, int count = 1) { if (dropout == 0f) diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index ab4cef124..0ebd73628 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -38,7 +38,17 @@ public RNN(RNNArgs args) : base(PreConstruct(args)) SupportsMasking = true; // if is StackedRnncell - _cell = args.Cell; + if (args.Cells != null) + { + _cell = new StackedRNNCells(new StackedRNNCellsArgs + { + Cells = args.Cells + }); + } + else + { + _cell = args.Cell; + } // get input_shape _args = PreConstruct(args); @@ -122,6 +132,8 @@ private OneOf> compute_output_shape(Shape input_shape) var state_shape = new int[] { (int)batch }.concat(flat_state.as_int_list()); return new Shape(state_shape); }; + + var state_shape = _get_state_shape(state_size); return new List { output_shape, state_shape }; @@ -240,7 +252,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo if (_cell is StackedRNNCells) { var stack_cell = _cell as StackedRNNCells; - foreach (var cell in stack_cell.Cells) + foreach (IRnnCell cell in stack_cell.Cells) { _maybe_reset_cell_dropout_mask(cell); } @@ -253,7 +265,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo } Shape input_shape; - if (!inputs.IsSingle()) + if (!inputs.IsNested()) { // In the case of nested input, use the first element for shape check // input_shape = nest.flatten(inputs)[0].shape; @@ -267,7 +279,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo var timesteps = _args.TimeMajor ? input_shape[0] : input_shape[1]; - if (_args.Unroll && timesteps != null) + if (_args.Unroll && timesteps == null) { throw new ValueError( "Cannot unroll a RNN if the " + @@ -302,7 +314,6 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo states = new Tensors(states.SkipLast(_num_constants)); states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states; var (output, new_states) = _cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); - // TODO(Wanglongzhi2001),should cell_call_fn's return value be Tensors, Tensors? return (output, new_states.Single); }; } @@ -310,13 +321,14 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo { step = (inputs, states) => { - states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states; + states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states.First()) : states; var (output, new_states) = _cell.Apply(inputs, states); - return (output, new_states.Single); + return (output, new_states); }; } - - var (last_output, outputs, states) = keras.backend.rnn(step, + + var (last_output, outputs, states) = keras.backend.rnn( + step, inputs, initial_state, constants: constants, @@ -394,6 +406,7 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo initial_state = null; inputs = inputs[0]; } + if (_args.Stateful) { @@ -402,7 +415,7 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo var tmp = new Tensor[] { }; foreach (var s in nest.flatten(States)) { - tmp.add(tf.math.count_nonzero((Tensor)s)); + tmp.add(tf.math.count_nonzero(s.Single())); } var non_zero_count = tf.add_n(tmp); //initial_state = tf.cond(non_zero_count > 0, () => States, () => initial_state); @@ -415,6 +428,15 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo { initial_state = States; } + // TODO(Wanglongzhi2001), +// initial_state = tf.nest.map_structure( +//# When the layer has a inferred dtype, use the dtype from the +//# cell. +// lambda v: tf.cast( +// v, self.compute_dtype or self.cell.compute_dtype +// ), +// initial_state, +// ) } else if (initial_state is null) @@ -424,10 +446,9 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo if (initial_state.Length != States.Length) { - throw new ValueError( - $"Layer {this} expects {States.Length} state(s), " + - $"but it received {initial_state.Length} " + - $"initial state(s). Input received: {inputs}"); + throw new ValueError($"Layer {this} expects {States.Length} state(s), " + + $"but it received {initial_state.Length} " + + $"initial state(s). Input received: {inputs}"); } return (inputs, initial_state, constants); @@ -458,11 +479,11 @@ private void _validate_args_if_ragged(bool is_ragged_input, Tensors mask) void _maybe_reset_cell_dropout_mask(ILayer cell) { - //if (cell is DropoutRNNCellMixin) - //{ - // cell.reset_dropout_mask(); - // cell.reset_recurrent_dropout_mask(); - //} + if (cell is DropoutRNNCellMixin CellDRCMixin) + { + CellDRCMixin.reset_dropout_mask(); + CellDRCMixin.reset_recurrent_dropout_mask(); + } } private static RNNArgs PreConstruct(RNNArgs args) @@ -537,15 +558,24 @@ public Tensors __call__(Tensors inputs, Tensor state = null, Tensor training = n protected Tensors get_initial_state(Tensors inputs) { + var get_initial_state_fn = _cell.GetType().GetMethod("get_initial_state"); + var input = inputs[0]; - var input_shape = input.shape; + var input_shape = inputs.shape; var batch_size = _args.TimeMajor ? input_shape[1] : input_shape[0]; var dtype = input.dtype; - Tensors init_state; - if (_cell is RnnCellBase rnn_base_cell) + + Tensors init_state = new Tensors(); + + if(get_initial_state_fn != null) { - init_state = rnn_base_cell.GetInitialState(null, batch_size, dtype); + init_state = (Tensors)get_initial_state_fn.Invoke(_cell, new object[] { inputs, batch_size, dtype }); + } + //if (_cell is RnnCellBase rnn_base_cell) + //{ + // init_state = rnn_base_cell.GetInitialState(null, batch_size, dtype); + //} else { init_state = RnnUtils.generate_zero_filled_state(batch_size, _cell.StateSize, dtype); diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs index f0b2ed4d7..39610ff52 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs @@ -6,6 +6,7 @@ using Tensorflow.Keras.Saving; using Tensorflow.Common.Types; using Tensorflow.Common.Extensions; +using Tensorflow.Keras.Utils; namespace Tensorflow.Keras.Layers.Rnn { @@ -77,8 +78,10 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra var rec_dp_mask = get_recurrent_dropout_maskcell_for_cell(prev_output, training.Value); Tensor h; + var ranks = inputs.rank; if (dp_mask != null) { + h = math_ops.matmul(math_ops.multiply(inputs.Single, dp_mask.Single), _kernel.AsTensor()); } else @@ -95,7 +98,7 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra { prev_output = math_ops.multiply(prev_output, rec_dp_mask); } - + var tmp = _recurrent_kernel.AsTensor(); Tensor output = h + math_ops.matmul(prev_output, _recurrent_kernel.AsTensor()); if (_args.Activation != null) @@ -113,5 +116,10 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra return new Tensors(output, output); } } + + public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null) + { + return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size.Value, dtype.Value); + } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs index 0b92fd3cf..56634853d 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs @@ -1,17 +1,20 @@ using System; using System.Collections.Generic; using System.ComponentModel; +using System.Linq; +using Tensorflow.Common.Extensions; using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.Keras.Utils; namespace Tensorflow.Keras.Layers.Rnn { public class StackedRNNCells : Layer, IRnnCell { - public IList Cells { get; set; } + public IList Cells { get; set; } public bool reverse_state_order; public StackedRNNCells(StackedRNNCellsArgs args) : base(args) @@ -20,8 +23,19 @@ public StackedRNNCells(StackedRNNCellsArgs args) : base(args) { args.Kwargs = new Dictionary(); } - + foreach (var cell in args.Cells) + { + //Type type = cell.GetType(); + //var CallMethodInfo = type.GetMethod("Call"); + //if (CallMethodInfo == null) + //{ + // throw new ValueError( + // "All cells must have a `Call` method. " + + // $"Received cell without a `Call` method: {cell}"); + //} + } Cells = args.Cells; + reverse_state_order = (bool)args.Kwargs.Get("reverse_state_order", false); if (reverse_state_order) @@ -33,91 +47,112 @@ public StackedRNNCells(StackedRNNCellsArgs args) : base(args) } } - public object state_size + public GeneralizedTensorShape StateSize { - get => throw new NotImplementedException(); - //@property - //def state_size(self) : - // return tuple(c.state_size for c in - // (self.cells[::- 1] if self.reverse_state_order else self.cells)) + get + { + GeneralizedTensorShape state_size = new GeneralizedTensorShape(1, Cells.Count); + if (reverse_state_order && Cells.Count > 0) + { + var idxAndCell = Cells.Reverse().Select((cell, idx) => (idx, cell)); + foreach (var cell in idxAndCell) + { + state_size.Shapes[cell.idx] = cell.cell.StateSize.Shapes.First(); + } + } + else + { + //foreach (var cell in Cells) + //{ + // state_size.Shapes.add(cell.StateSize.Shapes.First()); + + //} + var idxAndCell = Cells.Select((cell, idx) => (idx, cell)); + foreach (var cell in idxAndCell) + { + state_size.Shapes[cell.idx] = cell.cell.StateSize.Shapes.First(); + } + } + return state_size; + } } public object output_size { get { - var lastCell = Cells[Cells.Count - 1]; - - if (lastCell.output_size != -1) + var lastCell = Cells.LastOrDefault(); + if (lastCell.OutputSize.ToSingleShape() != -1) { - return lastCell.output_size; + return lastCell.OutputSize; } else if (RNN.is_multiple_state(lastCell.StateSize)) { - // return ((dynamic)Cells[-1].state_size)[0]; - throw new NotImplementedException(""); + return lastCell.StateSize.First(); + //throw new NotImplementedException(""); } else { - return Cells[-1].state_size; + return lastCell.StateSize; } } } - public object get_initial_state() + public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null) { - throw new NotImplementedException(); - // def get_initial_state(self, inputs= None, batch_size= None, dtype= None) : - // initial_states = [] - // for cell in self.cells[::- 1] if self.reverse_state_order else self.cells: - // get_initial_state_fn = getattr(cell, 'get_initial_state', None) - // if get_initial_state_fn: - // initial_states.append(get_initial_state_fn( - // inputs=inputs, batch_size=batch_size, dtype=dtype)) - // else: - // initial_states.append(_generate_zero_filled_state_for_cell( - // cell, inputs, batch_size, dtype)) - - // return tuple(initial_states) + var cells = reverse_state_order ? Cells.Reverse() : Cells; + Tensors initial_states = new Tensors(); + foreach (var cell in cells) + { + var get_initial_state_fn = cell.GetType().GetMethod("get_initial_state"); + if (get_initial_state_fn != null) + { + var result = (Tensors)get_initial_state_fn.Invoke(cell, new object[] { inputs, batch_size, dtype }); + initial_states.Add(result); + } + else + { + initial_states.Add(RnnUtils.generate_zero_filled_state_for_cell(cell, inputs, batch_size.Value, dtype.Value)); + } + } + return initial_states; } - public object call() + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { - throw new NotImplementedException(); - // def call(self, inputs, states, constants= None, training= None, ** kwargs): - // # Recover per-cell states. - // state_size = (self.state_size[::- 1] - // if self.reverse_state_order else self.state_size) - // nested_states = nest.pack_sequence_as(state_size, nest.flatten(states)) - - // # Call the cells in order and store the returned states. - // new_nested_states = [] - // for cell, states in zip(self.cells, nested_states) : - // states = states if nest.is_nested(states) else [states] - //# TF cell does not wrap the state into list when there is only one state. - // is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None - // states = states[0] if len(states) == 1 and is_tf_rnn_cell else states - // if generic_utils.has_arg(cell.call, 'training'): - // kwargs['training'] = training - // else: - // kwargs.pop('training', None) - // # Use the __call__ function for callable objects, eg layers, so that it - // # will have the proper name scopes for the ops, etc. - // cell_call_fn = cell.__call__ if callable(cell) else cell.call - // if generic_utils.has_arg(cell.call, 'constants'): - // inputs, states = cell_call_fn(inputs, states, - // constants= constants, ** kwargs) - // else: - // inputs, states = cell_call_fn(inputs, states, ** kwargs) - // new_nested_states.append(states) + // Recover per-cell states. + var state_size = reverse_state_order ? StateSize.Reverse() : StateSize; + var nested_states = reverse_state_order ? state.Flatten().Reverse() : state.Flatten(); - // return inputs, nest.pack_sequence_as(state_size, - // nest.flatten(new_nested_states)) + + var new_nest_states = new Tensors(); + // Call the cells in order and store the returned states. + foreach (var (cell, states) in zip(Cells, nested_states)) + { + // states = states if tf.nest.is_nested(states) else [states] + var type = cell.GetType(); + bool IsTFRnnCell = type.GetProperty("IsTFRnnCell") != null; + state = len(state) == 1 && IsTFRnnCell ? state.FirstOrDefault() : state; + + RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs; + Tensors? constants = rnn_optional_args?.Constants; + + Tensors new_states; + (inputs, new_states) = cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); + + new_nest_states.Add(new_states); + } + new_nest_states = reverse_state_order ? new_nest_states.Reverse().ToArray() : new_nest_states.ToArray(); + return new Nest(new List> { + new Nest(new List> { new Nest(inputs.Single()) }), new Nest(new_nest_states) }) + .ToTensors(); } + + public void build() { - throw new NotImplementedException(); + built = true; // @tf_utils.shape_type_conversion // def build(self, input_shape) : // if isinstance(input_shape, list) : @@ -168,9 +203,9 @@ public void from_config() { throw new NotImplementedException(); } - public GeneralizedTensorShape StateSize => throw new NotImplementedException(); + public GeneralizedTensorShape OutputSize => throw new NotImplementedException(); - public bool IsTFRnnCell => throw new NotImplementedException(); + public bool IsTFRnnCell => true; public bool SupportOptionalArgs => throw new NotImplementedException(); } } diff --git a/test/TensorFlowNET.Keras.UnitTest/Callbacks/EarlystoppingTest.cs b/test/TensorFlowNET.Keras.UnitTest/Callbacks/EarlystoppingTest.cs index ac5ba15ed..29648790f 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Callbacks/EarlystoppingTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Callbacks/EarlystoppingTest.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using Tensorflow.Keras.Callbacks; using Tensorflow.Keras.Engine; +using Tensorflow.NumPy; using static Tensorflow.KerasApi; @@ -18,7 +19,7 @@ public void Earlystopping() var layers = keras.layers; var model = keras.Sequential(new List { - layers.Rescaling(1.0f / 255, input_shape: (32, 32, 3)), + layers.Rescaling(1.0f / 255, input_shape: (28, 28, 1)), layers.Conv2D(32, 3, padding: "same", activation: keras.activations.Relu), layers.MaxPooling2D(), layers.Flatten(), @@ -36,8 +37,20 @@ public void Earlystopping() var num_epochs = 3; var batch_size = 8; - var ((x_train, y_train), (x_test, y_test)) = keras.datasets.cifar10.load_data(); - x_train = x_train / 255.0f; + var data_loader = new MnistModelLoader(); + + var dataset = data_loader.LoadAsync(new ModelLoadSetting + { + TrainDir = "mnist", + OneHot = false, + ValidationSize = 59900, + }).Result; + + NDArray x1 = np.reshape(dataset.Train.Data, (dataset.Train.Data.shape[0], 28, 28, 1)); + NDArray x2 = x1; + + var x = new NDArray[] { x1, x2 }; + // define a CallbackParams first, the parameters you pass al least contain Model and Epochs. CallbackParams callback_parameters = new CallbackParams { @@ -47,10 +60,8 @@ public void Earlystopping() // define your earlystop ICallback earlystop = new EarlyStopping(callback_parameters, "accuracy"); // define a callbcaklist, then add the earlystopping to it. - var callbacks = new List(); - callbacks.add(earlystop); - - model.fit(x_train[new Slice(0, 2000)], y_train[new Slice(0, 2000)], batch_size, num_epochs, callbacks: callbacks); + var callbacks = new List{ earlystop}; + model.fit(x, dataset.Train.Labels, batch_size, num_epochs, callbacks: callbacks); } } diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index 55663d41c..28a16ad4e 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -4,25 +4,111 @@ using System.Linq; using System.Text; using System.Threading.Tasks; +using Tensorflow.Common.Types; +using Tensorflow.Keras.Engine; +using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Saving; using Tensorflow.NumPy; +using Tensorflow.Train; using static Tensorflow.Binding; +using static Tensorflow.KerasApi; namespace Tensorflow.Keras.UnitTest.Layers { [TestClass] public class Rnn { + [TestMethod] + public void SimpleRNNCell() + { + //var cell = tf.keras.layers.SimpleRNNCell(64, dropout: 0.5f, recurrent_dropout: 0.5f); + //var h0 = new Tensors { tf.zeros(new Shape(4, 64)) }; + //var x = tf.random.normal((4, 100)); + //var (y, h1) = cell.Apply(inputs: x, states: h0); + //var h2 = h1; + //Assert.AreEqual((4, 64), y.shape); + //Assert.AreEqual((4, 64), h2[0].shape); + + //var model = keras.Sequential(new List + //{ + // keras.layers.InputLayer(input_shape: (4,100)), + // keras.layers.SimpleRNNCell(64) + //}); + //model.summary(); + + var cell = tf.keras.layers.SimpleRNNCell(64, dropout: 0.5f, recurrent_dropout: 0.5f); + var h0 = new Tensors { tf.zeros(new Shape(4, 64)) }; + var x = tf.random.normal((4, 100)); + var (y, h1) = cell.Apply(inputs: x, states: h0); + var h2 = h1; + Assert.AreEqual((4, 64), y.shape); + Assert.AreEqual((4, 64), h2[0].shape); + } + + [TestMethod] + public void StackedRNNCell() + { + var inputs = tf.ones((32, 10)); + var states = new Tensors { tf.zeros((32, 4)), tf.zeros((32, 5)) }; + var cells = new IRnnCell[] { tf.keras.layers.SimpleRNNCell(4), tf.keras.layers.SimpleRNNCell(5) }; + var stackedRNNCell = tf.keras.layers.StackedRNNCells(cells); + var (output, state) = stackedRNNCell.Apply(inputs, states); + Console.WriteLine(output); + Console.WriteLine(state.shape); + Assert.AreEqual((32, 5), output.shape); + Assert.AreEqual((32, 4), state[0].shape); + } + [TestMethod] public void SimpleRNN() { - var inputs = np.arange(6 * 10 * 8).reshape((6, 10, 8)).astype(np.float32); - /*var simple_rnn = keras.layers.SimpleRNN(4); - var output = simple_rnn.Apply(inputs); - Assert.AreEqual((32, 4), output.shape);*/ - var simple_rnn = tf.keras.layers.SimpleRNN(4, return_sequences: true, return_state: true); - var (whole_sequence_output, final_state) = simple_rnn.Apply(inputs); - Console.WriteLine(whole_sequence_output); - Console.WriteLine(final_state); + //var inputs = np.arange(6 * 10 * 8).reshape((6, 10, 8)).astype(np.float32); + ///*var simple_rnn = keras.layers.SimpleRNN(4); + //var output = simple_rnn.Apply(inputs); + //Assert.AreEqual((32, 4), output.shape);*/ + + //var simple_rnn = tf.keras.layers.SimpleRNN(4, return_sequences: true, return_state: true); + //var (whole_sequence_output, final_state) = simple_rnn.Apply(inputs); + //Assert.AreEqual((6, 10, 4), whole_sequence_output.shape); + //Assert.AreEqual((6, 4), final_state.shape); + + var inputs = keras.Input(shape: (10, 8)); + var x = keras.layers.SimpleRNN(4).Apply(inputs); + var output = keras.layers.Dense(10).Apply(x); + var model = keras.Model(inputs, output); + model.summary(); + } + [TestMethod] + public void RNNForSimpleRNNCell() + { + var inputs = tf.random.normal((32, 10, 8)); + var cell = tf.keras.layers.SimpleRNNCell(10, dropout: 0.5f, recurrent_dropout: 0.5f); + var rnn = tf.keras.layers.RNN(cell: cell); + var output = rnn.Apply(inputs); + Assert.AreEqual((32, 10), output.shape); + } + [TestMethod] + public void RNNForStackedRNNCell() + { + var inputs = tf.random.normal((32, 10, 8)); + var cells = new IRnnCell[] { tf.keras.layers.SimpleRNNCell(4), tf.keras.layers.SimpleRNNCell(5) }; + var stackedRNNCell = tf.keras.layers.StackedRNNCells(cells); + var rnn = tf.keras.layers.RNN(cell: stackedRNNCell); + var output = rnn.Apply(inputs); + Assert.AreEqual((32, 5), output.shape); + } + + [TestMethod] + public void WlzTest() + { + long[] b = { 1, 2, 3 }; + + Shape a = new Shape(Unknown).concatenate(b); + Console.WriteLine(a); + + } + + } } From f1fbcf20166fa1902e399998aaf1c738493f9785 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Fri, 16 Jun 2023 14:30:54 +0800 Subject: [PATCH 036/182] feat: support model building with RNN. --- src/TensorFlowNET.Core/APIs/c_api.cs | 14 + .../APIs/tf.control_flow.cs | 10 +- .../Common/Extensions/LinqExtensions.cs | 7 +- .../Common/Types/FakeTensorByTensorArray.cs | 20 + .../Common/Types/GeneralizedTensorShape.cs | 140 +- .../Types/{INest.cs => INestStructure.cs} | 13 + .../Common/Types/Nest.Static.cs | 2 +- src/TensorFlowNET.Core/Common/Types/Nest.cs | 117 +- .../Common/Types/NestDictionary.cs | 4 + .../Common/Types/NestList.cs | 17 +- .../Common/Types/NestNode.cs | 4 + src/TensorFlowNET.Core/Data/DatasetV2.cs | 4 +- .../Eager/EagerRunner.TFE_FastPathExecute.cs | 2 + .../Framework/Models/TensorSpec.cs | 13 + .../Framework/auto_control_deps_utils.cs | 89 ++ .../Framework/function_def_lib.cs | 4 +- .../Functions/ConcreteFunction.cs | 13 + src/TensorFlowNET.Core/Graphs/FuncGraph.cs | 4 +- src/TensorFlowNET.Core/Graphs/Graph.cs | 2 +- .../Keras/Layers/Rnn/IRnnCell.cs | 12 +- .../Operations/NnOps/RNNCell.cs | 4 + .../Operations/OpDefLibrary.cs | 49 + .../Operations/Operation.Output.cs | 2 +- .../Operations/Operation.cs | 5 +- .../Operations/_EagerTensorArray.cs | 6 +- .../Operations/_GraphTensorArray.cs | 179 ++- .../Operations/array_ops.cs | 24 + .../Operations/control_flow_ops.cs | 9 +- .../Operations/control_flow_util.py.cs | 77 ++ .../Operations/gen_functional_ops.cs | 1066 ++++++++++++-- .../Operations/gen_list_ops.cs | 1227 +++++++++++++++++ src/TensorFlowNET.Core/Operations/list_ops.cs | 111 ++ .../Operations/tensor_array_ops.cs | 20 +- src/TensorFlowNET.Core/Operations/while_v2.cs | 401 ++++++ .../Tensors/Tensor.Creation.cs | 7 + src/TensorFlowNET.Core/Tensors/TensorArray.cs | 24 + src/TensorFlowNET.Core/Tensors/Tensors.cs | 54 +- src/TensorFlowNET.Core/ops.cs | 2 +- src/TensorFlowNET.Keras/BackendImpl.cs | 95 +- src/TensorFlowNET.Keras/Engine/Model.Build.cs | 2 +- .../Engine/Model.Evaluate.cs | 4 +- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 2 +- src/TensorFlowNET.Keras/Engine/Model.Train.cs | 2 +- .../Layers/Rnn/DropoutRNNCellMixin.cs | 11 +- src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 39 +- .../Layers/Rnn/RnnCellBase.cs | 24 - .../Layers/Rnn/SimpleRNNCell.cs | 7 +- .../Layers/Rnn/StackedRNNCells.cs | 152 +- src/TensorFlowNET.Keras/Utils/RnnUtils.cs | 35 +- .../ManagedAPI/ControlFlowApiTest.cs | 4 +- tools/Tensorflow.CodeGen/FunctionGenerator.cs | 24 +- tools/Tensorflow.CodeGen/Program.cs | 2 +- tools/Tensorflow.CodeGen/Utils.cs | 8 +- 53 files changed, 3662 insertions(+), 507 deletions(-) create mode 100644 src/TensorFlowNET.Core/Common/Types/FakeTensorByTensorArray.cs rename src/TensorFlowNET.Core/Common/Types/{INest.cs => INestStructure.cs} (65%) create mode 100644 src/TensorFlowNET.Core/Framework/auto_control_deps_utils.cs create mode 100644 src/TensorFlowNET.Core/Operations/gen_list_ops.cs create mode 100644 src/TensorFlowNET.Core/Operations/list_ops.cs create mode 100644 src/TensorFlowNET.Core/Operations/while_v2.cs delete mode 100644 src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs index 10f678e0a..6049c95cc 100644 --- a/src/TensorFlowNET.Core/APIs/c_api.cs +++ b/src/TensorFlowNET.Core/APIs/c_api.cs @@ -16,6 +16,7 @@ limitations under the License. using System; using System.Runtime.InteropServices; +using static Tensorflow.CppShapeInferenceResult.Types; namespace Tensorflow { @@ -50,6 +51,19 @@ public static string StringPiece(IntPtr handle) return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle); } + public unsafe static byte[] ByteStringPiece(IntPtr handle) + { + byte* str_data = (byte*)handle.ToPointer(); + List bytes = new List(); + byte current = 255; + while (current != ((byte)'\0')) + { + current = *(str_data++); + bytes.Add(current); + } + return bytes.Take(bytes.Count - 1).ToArray(); + } + [UnmanagedFunctionPointer(CallingConvention.Winapi)] public delegate void Deallocator(IntPtr data, IntPtr size, ref DeallocatorArgs args); diff --git a/src/TensorFlowNET.Core/APIs/tf.control_flow.cs b/src/TensorFlowNET.Core/APIs/tf.control_flow.cs index 239487e05..cd5a71e50 100644 --- a/src/TensorFlowNET.Core/APIs/tf.control_flow.cs +++ b/src/TensorFlowNET.Core/APIs/tf.control_flow.cs @@ -46,10 +46,10 @@ public Tensor while_loop(Func cond, Tensor loop_vars, int parallel_iterations = 10) { - Func cond1 = x + Func cond1 = x => cond(x[0]); - Func body1 = x + Func body1 = x => new[] { body(x[0]) }; var results = control_flow_ops.while_loop(cond1, @@ -58,9 +58,9 @@ public Tensor while_loop(Func cond, return results[0]; } - public Tensor[] while_loop(Func cond, - Func body, - Tensor[] loop_vars, + public Tensor[] while_loop(Func cond, + Func body, + Tensors loop_vars, int parallel_iterations = 10, string name = null) => control_flow_ops.while_loop(cond, body, loop_vars, diff --git a/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs b/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs index 6cf62e7b8..287b48cc3 100644 --- a/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs +++ b/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs @@ -18,7 +18,12 @@ public static IEnumerable SkipLast(this IEnumerable sequence, int count return sequence.Take(sequence.Count() - count); } #endif - public static Tensors ToTensors(this IEnumerable tensors) + public static Tensors ToTensors(this Tensor[] tensors) + { + return new Tensors(tensors); + } + + public static Tensors ToTensors(this IList tensors) { return new Tensors(tensors); } diff --git a/src/TensorFlowNET.Core/Common/Types/FakeTensorByTensorArray.cs b/src/TensorFlowNET.Core/Common/Types/FakeTensorByTensorArray.cs new file mode 100644 index 000000000..d0c35ee70 --- /dev/null +++ b/src/TensorFlowNET.Core/Common/Types/FakeTensorByTensorArray.cs @@ -0,0 +1,20 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Common.Types +{ + /// + /// This is a temp solution, which should be removed after refactoring `Tensors` + /// + [Obsolete] + public class FakeTensorByTensorArray: Tensor + { + public TensorArray TensorArray { get; set; } + + public FakeTensorByTensorArray(TensorArray array) + { + TensorArray = array; + } + } +} diff --git a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs index c61d04b25..401903159 100644 --- a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs +++ b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs @@ -5,136 +5,80 @@ namespace Tensorflow.Common.Types { - public class GeneralizedTensorShape: IEnumerable, INestStructure, INestable + public class GeneralizedTensorShape: Nest { - public TensorShapeConfig[] Shapes { get; set; } - /// - /// create a single-dim generalized Tensor shape. - /// - /// - public GeneralizedTensorShape(int dim, int size = 1) - { - var elem = new TensorShapeConfig() { Items = new long?[] { dim } }; - Shapes = Enumerable.Repeat(elem, size).ToArray(); - //Shapes = new TensorShapeConfig[size]; - //Shapes.Initialize(new TensorShapeConfig() { Items = new long?[] { dim } }); - //Array.Initialize(Shapes, new TensorShapeConfig() { Items = new long?[] { dim } }); - ////Shapes = new TensorShapeConfig[] { new TensorShapeConfig() { Items = new long?[] { dim } } }; - } + ////public TensorShapeConfig[] Shapes { get; set; } + ///// + ///// create a single-dim generalized Tensor shape. + ///// + ///// + //public GeneralizedTensorShape(int dim, int size = 1) + //{ + // var elem = new TensorShapeConfig() { Items = new long?[] { dim } }; + // Shapes = Enumerable.Repeat(elem, size).ToArray(); + // //Shapes = new TensorShapeConfig[size]; + // //Shapes.Initialize(new TensorShapeConfig() { Items = new long?[] { dim } }); + // //Array.Initialize(Shapes, new TensorShapeConfig() { Items = new long?[] { dim } }); + // ////Shapes = new TensorShapeConfig[] { new TensorShapeConfig() { Items = new long?[] { dim } } }; + //} - public GeneralizedTensorShape(Shape shape) + public GeneralizedTensorShape(Shape value, string? name = null) { - Shapes = new TensorShapeConfig[] { shape }; + NodeValue = value; + NestType = NestType.Node; } - public GeneralizedTensorShape(TensorShapeConfig shape) + public GeneralizedTensorShape(IEnumerable values, string? name = null) { - Shapes = new TensorShapeConfig[] { shape }; + ListValue = values.Select(s => new Nest(s) as INestStructure).ToList(); + Name = name; + NestType = NestType.List; } - public GeneralizedTensorShape(TensorShapeConfig[] shapes) + public GeneralizedTensorShape(Dictionary value, string? name = null) { - Shapes = shapes; + DictValue = value.ToDictionary(x => x.Key, x => new Nest(x.Value) as INestStructure); + Name = name; + NestType = NestType.Dictionary; } - public GeneralizedTensorShape(IEnumerable shape) + public GeneralizedTensorShape(Nest other) { - Shapes = shape.Select(x => (TensorShapeConfig)x).ToArray(); + NestType = other.NestType; + NodeValue = other.NodeValue; + DictValue = other.DictValue; + ListValue = other.ListValue; + Name = other.Name; } public Shape ToSingleShape() { - if (Shapes.Length != 1) + var shapes = Flatten().ToList(); + if (shapes.Count != 1) { throw new ValueError("The generalized shape contains more than 1 dim."); } - var shape_config = Shapes[0]; - Debug.Assert(shape_config is not null); - return new Shape(shape_config.Items.Select(x => x is null ? -1 : x.Value).ToArray()); + return shapes[0]; } public long ToNumber() { - if(Shapes.Length != 1 || Shapes[0].Items.Length != 1) + var shapes = Flatten().ToList(); + if (shapes.Count != 1 || shapes[0].ndim != 1) { throw new ValueError("The generalized shape contains more than 1 dim."); } - var res = Shapes[0].Items[0]; - return res is null ? -1 : res.Value; - } - - public Shape[] ToShapeArray() - { - return Shapes.Select(x => new Shape(x.Items.Select(y => y is null ? -1 : y.Value).ToArray())).ToArray(); - } - - public IEnumerable Flatten() - { - List result = new List(); - foreach(var shapeConfig in Shapes) - { - result.AddRange(shapeConfig.Items); - } - return result; - } - public INestStructure MapStructure(Func func) - { - List> lists = new(); - foreach(var shapeConfig in Shapes) - { - lists.Add(new Nest(shapeConfig.Items.Select(x => new Nest(func(x))))); - } - return new Nest(lists); - } - - public Nest AsNest() - { - Nest DealWithSingleShape(TensorShapeConfig config) - { - if (config.Items.Length == 0) - { - return Nest.Empty; - } - else if (config.Items.Length == 1) - { - return new Nest(config.Items[0]); - } - else - { - return new Nest(config.Items.Select(x => new Nest(x))); - } - } - - if(Shapes.Length == 0) - { - return Nest.Empty; - } - else if(Shapes.Length == 1) - { - return DealWithSingleShape(Shapes[0]); - } - else - { - return new Nest(Shapes.Select(s => DealWithSingleShape(s))); - } + return shapes[0].dims[0]; } - - - public static implicit operator GeneralizedTensorShape(int dims) - => new GeneralizedTensorShape(dims); - - public IEnumerator GetEnumerator() + public INestStructure ToTensorShapeConfigs() { - foreach (var shape in Shapes) - { - yield return shape.Items; - } + return MapStructure(s => new TensorShapeConfig() { Items = s.dims.Select(x => x == -1 ? null : x).ToArray() }); } - IEnumerator IEnumerable.GetEnumerator() + public static implicit operator GeneralizedTensorShape(Shape shape) { - return GetEnumerator(); + return new GeneralizedTensorShape(shape); } } } diff --git a/src/TensorFlowNET.Core/Common/Types/INest.cs b/src/TensorFlowNET.Core/Common/Types/INestStructure.cs similarity index 65% rename from src/TensorFlowNET.Core/Common/Types/INest.cs rename to src/TensorFlowNET.Core/Common/Types/INestStructure.cs index 001141ddc..32b662937 100644 --- a/src/TensorFlowNET.Core/Common/Types/INest.cs +++ b/src/TensorFlowNET.Core/Common/Types/INestStructure.cs @@ -10,6 +10,19 @@ namespace Tensorflow.Common.Types /// public interface INestStructure: INestable { + NestType NestType { get; } + + /// + /// The item count of depth 1 of the nested structure. + /// For example, [1, 2, [3, 4, 5]] has ShallowNestedCount = 3. + /// + int ShallowNestedCount { get; } + /// + /// The total item count of depth 1 of the nested structure. + /// For example, [1, 2, [3, 4, 5]] has TotalNestedCount = 5. + /// + int TotalNestedCount { get; } + /// /// Flatten the Nestable object. Node that if the object contains only one value, /// it will be flattened to an enumerable with one element. diff --git a/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs b/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs index b67d11f42..dc7fd3a1f 100644 --- a/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs +++ b/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs @@ -13,7 +13,7 @@ public static class Nest /// /// /// - public static Nest PackSequenceAs(INestable template, T[] flatItems) + public static Nest PackSequenceAs(INestable template, TOut[] flatItems) { return template.AsNest().PackSequence(flatItems); } diff --git a/src/TensorFlowNET.Core/Common/Types/Nest.cs b/src/TensorFlowNET.Core/Common/Types/Nest.cs index 84a60402e..4de7d1fa5 100644 --- a/src/TensorFlowNET.Core/Common/Types/Nest.cs +++ b/src/TensorFlowNET.Core/Common/Types/Nest.cs @@ -28,27 +28,58 @@ public class Nest : INestStructure, IEnumerable public static Nest Empty => _empty; public NestType NestType { get; protected set; } public string? Name { get; set; } - public T? Value { get; protected set; } - public List>? ListValue { get; protected set; } - public Dictionary>? DictValue { get; protected set; } + public T? NodeValue { get; protected set; } + public List>? ListValue { get; protected set; } + public Dictionary>? DictValue { get; protected set; } + + public int ShallowNestedCount + { + get + { + if (NestType == NestType.Empty) + { + return 0; + } + else if (NestType == NestType.Node) + { + return 1; + } + else if (NestType == NestType.List) + { + return ListValue!.Count; + } + else // dict + { + return DictValue!.Count; + } + } + } + + public int TotalNestedCount + { + get + { + return Flatten().Count(); + } + } protected Nest() { } public Nest(T value, string? name = null) { - Value = value; + NodeValue = value; Name = name; NestType = NestType.Node; } - public Nest(IEnumerable> values, string? name = null) + public Nest(IEnumerable> values, string? name = null) { ListValue = values.ToList(); Name = name; NestType = NestType.List; } - public Nest(Dictionary> value, string? name = null) + public Nest(Dictionary> value, string? name = null) { DictValue = value; Name = name; @@ -58,7 +89,7 @@ public Nest(Dictionary> value, string? name = null) public Nest(Nest other) { NestType = other.NestType; - Value = other.Value; + NodeValue = other.NodeValue; DictValue = other.DictValue; ListValue = other.ListValue; Name = other.Name; @@ -78,17 +109,17 @@ public virtual INestStructure MapStructure(Func func) /// /// /// - public virtual Nest PackSequence(T[] flatItems) + public virtual Nest PackSequence(TOut[] flatItems) { if(flatItems.Length == 0) { - return Nest.Empty; + return Nest.Empty; } int index = 0; return PackSequenceInternal(this, flatItems, ref index); } - private static Nest PackSequenceInternal(Nest template, T[] flatItems, ref int index) + private static Nest PackSequenceInternal(Nest template, TOut[] flatItems, ref int index) { if(template.NestType == NestType.Node) { @@ -96,25 +127,25 @@ private static Nest PackSequenceInternal(Nest template, T[] flatItems, ref { throw new InvalidArgumentError("The template and flat items are not matched."); } - return new Nest(flatItems[index++]); + return new Nest(flatItems[index++]); } else if(template.NestType == NestType.List) { - List> nestedObjects = new List>(); + List> nestedObjects = new List>(); for (int i = 0; i < template.ListValue!.Count; i++) { - nestedObjects.Add(PackSequenceInternal(template.ListValue![i], flatItems, ref index)); + nestedObjects.Add(PackSequenceInternal(template.ListValue![i].AsNest(), flatItems, ref index)); } - return new Nest(nestedObjects); + return new Nest(nestedObjects); } else if(template.NestType == NestType.Node) { - Dictionary> dict = new Dictionary>(); + Dictionary> dict = new Dictionary>(); foreach(var (key, value) in template.DictValue!) { - dict[key] = PackSequenceInternal(value, flatItems, ref index); + dict[key] = PackSequenceInternal(value.AsNest(), flatItems, ref index); } - return new Nest(dict); + return new Nest(dict); } // Consider Empty as invalid type. throw new InvalidArgumentError("When using `PackSequenceAs`, the template cannot contain empty node."); @@ -223,10 +254,10 @@ public T this[int index] public static Nest ReduceFrom(INestStructure input) where TOut: INestStructure { var nested = input.AsNest(); - return ReduceInternal(nested); + return ReduceInternal(nested).AsNest(); } - private static Nest ReduceInternal(Nest node) where TOut : INestStructure + private static INestStructure ReduceInternal(Nest node) where TOut : INestStructure { if(node.NestType == NestType.Empty) { @@ -234,15 +265,15 @@ private static Nest ReduceInternal(Nest node) where TOut : INestS } else if(node.NestType == NestType.Node) { - return node.Value!.AsNest(); + return node.NodeValue!.AsNest(); } else if(node.NestType == NestType.List) { - return new Nest(node.ListValue!.Select(x => ReduceInternal(x))); + return new Nest(node.ListValue!.Select(x => ReduceInternal(x.AsNest()))); } else // Dictionary type { - return new Nest(node.DictValue!.ToDictionary(x => x.Key, x => ReduceInternal(x.Value))); + return new Nest(node.DictValue!.ToDictionary(x => x.Key, x => ReduceInternal(x.Value.AsNest()))); } } @@ -252,7 +283,7 @@ private static bool FindInternal(Nest node, int index, out T? result) { if(index == 0) { - result = node.Value!; + result = node.NodeValue!; return true; } result = default(T); @@ -264,7 +295,7 @@ private static bool FindInternal(Nest node, int index, out T? result) { if(index == 0) { - return FindInternal(item, index, out result); + return FindInternal(item.AsNest(), index, out result); } index--; } @@ -277,7 +308,7 @@ private static bool FindInternal(Nest node, int index, out T? result) { if (index == 0) { - return FindInternal(item, index, out result); + return FindInternal(item.AsNest(), index, out result); } index--; } @@ -297,7 +328,7 @@ private static bool SetInternal(Nest node, int index, T newValue) { if (index == 0) { - node.Value = newValue; + node.NodeValue = newValue; return true; } return false; @@ -308,7 +339,7 @@ private static bool SetInternal(Nest node, int index, T newValue) { if (index == 0) { - return SetInternal(item, index, newValue); + return SetInternal(item.AsNest(), index, newValue); } index--; } @@ -320,7 +351,7 @@ private static bool SetInternal(Nest node, int index, T newValue) { if (index == 0) { - return SetInternal(item, index, newValue); + return SetInternal(item.AsNest(), index, newValue); } index--; } @@ -336,13 +367,13 @@ private static IEnumerable FlattenInternal(Nest node) { if (node.NestType == NestType.Node) { - yield return node.Value!; + yield return node.NodeValue!; } else if (node.NestType == NestType.List) { foreach (var item in node.ListValue!) { - foreach(var val in FlattenInternal(item)) + foreach(var val in FlattenInternal(item.AsNest())) { yield return val; } @@ -352,7 +383,7 @@ private static IEnumerable FlattenInternal(Nest node) { foreach (var item in node.DictValue!.Values) { - foreach (var val in FlattenInternal(item)) + foreach (var val in FlattenInternal(item.AsNest())) { yield return val; } @@ -364,23 +395,23 @@ private Nest MapStructureInternal(Func func) { if (NestType == NestType.Node) { - return new Nest(func(Value!)); + return new Nest(func(NodeValue!)); } else if (NestType == NestType.List) { List> outs = new List>(); foreach (var item in ListValue!) { - outs.Add(item.MapStructureInternal(func)); + outs.Add(item.AsNest().MapStructureInternal(func)); } return new Nest(outs); } else if (NestType == NestType.Dictionary) { - Dictionary> outs = new Dictionary>(); + Dictionary> outs = new Dictionary>(); foreach (var (key, value) in DictValue!) { - outs.Add(key, value.MapStructureInternal(func)); + outs.Add(key, value.AsNest().MapStructureInternal(func)); } return new Nest(outs); } @@ -417,14 +448,14 @@ private static void WriteString(Nest node, StringBuilder sb) } if (node.NestType == NestType.Node) { - sb.Append(node.Value!.ToString()); + sb.Append(node.NodeValue!.ToString()); } else if (node.NestType == NestType.List) { sb.Append("["); for(int i = 0; i < node.ListValue!.Count; i++) { - WriteString(node.ListValue![i], sb); + WriteString(node.ListValue![i].AsNest(), sb); if(i != node.ListValue!.Count - 1) { sb.Append(", "); @@ -440,7 +471,7 @@ private static void WriteString(Nest node, StringBuilder sb) foreach (var (key, value) in node.DictValue!) { sb.Append($"{key}: "); - WriteString(value, sb); + WriteString(value.AsNest(), sb); if (i != count - 1) { sb.Append(", "); @@ -454,5 +485,15 @@ private static void WriteString(Nest node, StringBuilder sb) sb.Append(""); } } + + public static implicit operator Nest((INestStructure, INestStructure) inputs) + { + return new Nest(new INestStructure[] { inputs.Item1, inputs.Item2 }); + } + + public static implicit operator Nest((INestStructure, INestStructure, INestStructure) inputs) + { + return new Nest(new INestStructure[] { inputs.Item1, inputs.Item2, inputs.Item3 }); + } } } diff --git a/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs b/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs index 554ca526d..cf1994554 100644 --- a/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs +++ b/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs @@ -6,7 +6,11 @@ namespace Tensorflow.Common.Types { public class NestDictionary : INestStructure, IDictionary where TKey : notnull { + public NestType NestType => NestType.Dictionary; public IDictionary Value { get; set; } + public int ShallowNestedCount => Values.Count; + + public int TotalNestedCount => Values.Count; public NestDictionary(IDictionary dict) { Value = dict; diff --git a/src/TensorFlowNET.Core/Common/Types/NestList.cs b/src/TensorFlowNET.Core/Common/Types/NestList.cs index 082187188..e38675da4 100644 --- a/src/TensorFlowNET.Core/Common/Types/NestList.cs +++ b/src/TensorFlowNET.Core/Common/Types/NestList.cs @@ -10,29 +10,34 @@ namespace Tensorflow.Common.Types /// public sealed class NestList : INestStructure, IEnumerable { - public List Value { get; set; } + public NestType NestType => NestType.List; + public List Values { get; set; } + public int ShallowNestedCount => Values.Count; + + public int TotalNestedCount => Values.Count; + public NestList(IEnumerable values) { - Value = new List(values); + Values = new List(values); } public IEnumerable Flatten() { - return Value; + return Values; } public INestStructure MapStructure(Func func) { - return new NestList(Value.Select(x => func(x))); + return new NestList(Values.Select(x => func(x))); } public Nest AsNest() { - return new Nest(Value.Select(x => new Nest(x))); + return new Nest(Values.Select(x => new Nest(x))); } // Enumerator implementation public IEnumerator GetEnumerator() { - return Value.GetEnumerator(); + return Values.GetEnumerator(); } IEnumerator IEnumerable.GetEnumerator() diff --git a/src/TensorFlowNET.Core/Common/Types/NestNode.cs b/src/TensorFlowNET.Core/Common/Types/NestNode.cs index 1dad421d9..701aade9a 100644 --- a/src/TensorFlowNET.Core/Common/Types/NestNode.cs +++ b/src/TensorFlowNET.Core/Common/Types/NestNode.cs @@ -10,7 +10,11 @@ namespace Tensorflow.Common.Types /// public class NestNode : INestStructure { + public NestType NestType => NestType.Node; public T Value { get; set; } + public int ShallowNestedCount => 1; + + public int TotalNestedCount => 1; public NestNode(T value) { Value = value; diff --git a/src/TensorFlowNET.Core/Data/DatasetV2.cs b/src/TensorFlowNET.Core/Data/DatasetV2.cs index 324d7e834..c1762d670 100644 --- a/src/TensorFlowNET.Core/Data/DatasetV2.cs +++ b/src/TensorFlowNET.Core/Data/DatasetV2.cs @@ -161,8 +161,8 @@ public override string ToString() break; } - yield return (new Tensors(results.Take(FirstInputTensorCount)), results.Length == FirstInputTensorCount ? - null : new Tensors(results.Skip(FirstInputTensorCount))); + yield return (new Tensors(results.Take(FirstInputTensorCount).ToArray()), results.Length == FirstInputTensorCount ? + null : new Tensors(results.Skip(FirstInputTensorCount).ToArray())); } } diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs index f1a09ed7b..5f156fd9b 100644 --- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs +++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs @@ -359,6 +359,8 @@ bool SetOpAttrScalar(Context ctx, SafeEagerOpHandle op, case TF_AttrType.TF_ATTR_FUNC: if (value is ConcreteFunction func) c_api.TFE_OpSetAttrFunctionName(op, key, func.func_graph.FuncName, func.func_graph.FuncName.Length); + else if(value is string str) + c_api.TFE_OpSetAttrFunctionName(op, key, str, str.Length); else throw new NotImplementedException("TF_AttrType.TF_ATTR_FUNC"); break; diff --git a/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs b/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs index 083d4813a..ac099ae2b 100644 --- a/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs +++ b/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs @@ -1,4 +1,5 @@ using System.Linq; +using Tensorflow.Eager; namespace Tensorflow.Framework.Models { @@ -24,5 +25,17 @@ public TensorSpec _batch(int dim = -1) shapes.Insert(0, dim); return new TensorSpec(shapes.ToArray(), _dtype); } + + public static TensorSpec FromTensor(Tensor tensor, string? name = null) + { + if(tensor is EagerTensor) + { + return new TensorSpec(tensor.shape, tensor.dtype, name); + } + else + { + return new TensorSpec(tensor.shape, tensor.dtype, name ?? tensor.name); + } + } } } diff --git a/src/TensorFlowNET.Core/Framework/auto_control_deps_utils.cs b/src/TensorFlowNET.Core/Framework/auto_control_deps_utils.cs new file mode 100644 index 000000000..28d9e5008 --- /dev/null +++ b/src/TensorFlowNET.Core/Framework/auto_control_deps_utils.cs @@ -0,0 +1,89 @@ +using Tensorflow.Graphs; + +namespace Tensorflow.Framework +{ + internal static class auto_control_deps_utils + { + public static readonly string READ_ONLY_RESOURCE_INPUTS_ATTR = "_read_only_resource_inputs"; + public static List get_read_only_resource_input_indices_graph(FuncGraph func_graph) + { + List result = new List(); + // A cache to store the read only resource inputs of an Op. + // Operation -> ObjectIdentitySet of resource handles. + Dictionary> opReadOnlyResourceInputs = + new Dictionary>(); + + for (int inputIndex = 0; inputIndex < func_graph.Inputs.Length; inputIndex++) + { + Tensor t = func_graph.Inputs[inputIndex]; + if (t.dtype != dtypes.resource) + continue; + + bool readOnly = true; + foreach (var op in t.consumers()) + { + if (opReadOnlyResourceInputs.ContainsKey(op)) + { + if (!opReadOnlyResourceInputs[op].Contains(t)) + { + readOnly = false; + break; + } + } + else + { + List indices = _get_read_only_resource_input_indices_op(op); + opReadOnlyResourceInputs[op] = new HashSet( + indices.Select(i => op.inputs[i])); + if (!opReadOnlyResourceInputs[op].Contains(t)) + { + readOnly = false; + break; + } + } + } + + if (readOnly) + result.Add(inputIndex); + } + + return result; + } + + private static List _get_read_only_resource_input_indices_op(Operation op) + { + // ignore the RESOURCE_READ_OPS + + int[] read_only_input_indices; + + try + { + read_only_input_indices = op.get_attr(READ_ONLY_RESOURCE_INPUTS_ATTR); + } + catch (InvalidArgumentError) + { + return new List(); + } + + int read_only_index = 0; + List result = new(); + for (int i = 0; i < op.inputs.Length; i++) + { + if (read_only_index >= read_only_input_indices.Length) + { + break; + } + if (op.inputs[i].dtype != dtypes.resource) + { + continue; + } + if (read_only_index < read_only_input_indices.Length && i == read_only_input_indices[read_only_index]) + { + result.Add(i); + read_only_index++; + } + } + return result; + } + } +} diff --git a/src/TensorFlowNET.Core/Framework/function_def_lib.cs b/src/TensorFlowNET.Core/Framework/function_def_lib.cs index 67f8d324e..488c6b654 100644 --- a/src/TensorFlowNET.Core/Framework/function_def_lib.cs +++ b/src/TensorFlowNET.Core/Framework/function_def_lib.cs @@ -42,10 +42,10 @@ public static FuncGraph function_def_to_graph(FunctionDef fdef, object? structur func_graph.as_default(); importer.import_graph_def(graph_def, name: "", validate_colocation_constraints: false); var input_tensor_names = fdef.Signature.InputArg.Select(x => nested_to_flat_tensor_name[x.Name]); - func_graph.Inputs = new Tensors(input_tensor_names.Select(x => func_graph.get_tensor_by_name(x))); + func_graph.Inputs = new Tensors(input_tensor_names.Select(x => func_graph.get_tensor_by_name(x)).ToArray()); var output_tensor_names = fdef.Signature.OutputArg.Select(x => nested_to_flat_tensor_name[fdef.Ret[x.Name]]); - func_graph.Outputs = new Tensors(output_tensor_names.Select(x => func_graph.get_tensor_by_name(x))); + func_graph.Outputs = new Tensors(output_tensor_names.Select(x => func_graph.get_tensor_by_name(x)).ToArray()); // TODO(Rinne): func_graph.ControlOutputs _set_handle_data(func_graph, fdef); diff --git a/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs b/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs index 88dce7d98..8742e4535 100644 --- a/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs +++ b/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs @@ -8,6 +8,7 @@ using Tensorflow.Graphs; using Tensorflow.Train; using Tensorflow.Util; +using Tensorflow.Common.Extensions; using static Tensorflow.Binding; namespace Tensorflow.Functions @@ -40,6 +41,18 @@ public class ConcreteFunction: Trackable public Tensor[] FlatStructuredOutputs => func_graph.FlatStructuredOutputs; public IEnumerable Variables => func_graph.Variables; public IEnumerable TrainableVariables => func_graph.TrainableVariables; + internal NameAttrList AsNameAttrList + { + get + { + NameAttrList ret = new() { Name = this.Name }; + foreach (var (name, value) in _attrs) + { + ret.Attr[name] = value; + } + return ret; + } + } public ConcreteFunction(string name) { diff --git a/src/TensorFlowNET.Core/Graphs/FuncGraph.cs b/src/TensorFlowNET.Core/Graphs/FuncGraph.cs index 3bce52ea5..ba7d7068e 100644 --- a/src/TensorFlowNET.Core/Graphs/FuncGraph.cs +++ b/src/TensorFlowNET.Core/Graphs/FuncGraph.cs @@ -81,7 +81,7 @@ internal set public IEnumerable TrainableVariables => Variables.Where(v => v.Trainable); public Dictionary Attrs { get; set; } - Dictionary _captures + internal Dictionary _captures = new Dictionary(); public Tensor[] external_captures @@ -399,7 +399,7 @@ public static FuncGraph func_graph_from_func(string name, Func x is Tensor).Select(x => (Tensor)x)); + .Where(x => x is Tensor).Select(x => (Tensor)x).ToArray()); //var func_args_before = nest.pack_sequence_as(func_args, flat_func_args, true); //var func_kwargs_before = nest.pack_sequence_as(func_kwargs, flat_func_kwargs, true); diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs index eb8df5812..9e879a0f0 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.cs @@ -129,7 +129,7 @@ public int seed } } - protected Graph outer_graph; + internal Graph outer_graph; public Graph OuterGraph => outer_graph; public Dictionary Functions => _functions; public SafeGraphHandle c_graph => _handle; diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs index d12ed1ad6..8614391a6 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs @@ -7,13 +7,19 @@ namespace Tensorflow.Keras.Layers.Rnn { public interface IRnnCell: ILayer { - GeneralizedTensorShape StateSize { get; } - GeneralizedTensorShape OutputSize { get; } - bool IsTFRnnCell { get; } + /// + /// If the derived class tends to not implement it, please return null. + /// + GeneralizedTensorShape? StateSize { get; } + /// + /// If the derived class tends to not implement it, please return null. + /// + GeneralizedTensorShape? OutputSize { get; } /// /// Whether the optional RNN args are supported when appying the layer. /// In other words, whether `Apply` is overwrited with process of `RnnOptionalArgs`. /// bool SupportOptionalArgs { get; } + Tensors GetInitialState(Tensors inputs, Tensor batch_size, TF_DataType dtype); } } diff --git a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs index 26646b76a..b651089a5 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs @@ -181,6 +181,10 @@ public void adapt(Tensor data, int? batch_size = null, int? steps = null) { throw new NotImplementedException(); } + public Tensors GetInitialState(Tensors inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid) + { + throw new NotImplementedException(); + } public GeneralizedTensorShape StateSize => throw new NotImplementedException(); public GeneralizedTensorShape OutputSize => throw new NotImplementedException(); public bool IsTFRnnCell => throw new NotImplementedException(); diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 76a222ba3..5ff5ccffc 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -15,9 +15,11 @@ limitations under the License. ******************************************************************************/ using Google.Protobuf; +using Google.Protobuf.Collections; using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Functions; using static Tensorflow.Binding; using static Tensorflow.OpDef.Types; @@ -420,6 +422,12 @@ private AttrValue SetAttrValue(OpDef op_def, AttrDef attr_def, object value) case "list(shape)": attr_value.List.Shape.AddRange((value as Shape[]).Select(x => _MakeShape(x, attr_def))); break; + case "func": + attr_value.Func = _MakeFunc(value, attr_def.Name); + break; + case "list(func)": + attr_value.List.Func.AddRange(_MakeFuncList(value, attr_def.Name)); + break; default: throw new TypeError($"SetAttrValue: can't not convert attr_def.Type '{attr_def.Type}' to protos."); } @@ -427,6 +435,47 @@ private AttrValue SetAttrValue(OpDef op_def, AttrDef attr_def, object value) return attr_value; } + private NameAttrList _MakeFunc(object func, string arg_name) + { + if(func is NameAttrList attrList) + { + return attrList; + } + NameAttrList fn_attr; + if(func is string funcStr) + { + fn_attr = new NameAttrList() { Name = funcStr }; + } + else if(func is ConcreteFunction concrete) + { + concrete.AddTograph(ops.get_default_graph()); + fn_attr = concrete.AsNameAttrList; + } + else if(func is EagerDefinedFunction eager) + { + eager.AddToGraph(ops.get_default_graph()); + fn_attr = new NameAttrList() { Name = eager.Name }; + } + else + { + throw new TypeError($"Don't know how to convert {func} to a func for argument {arg_name}"); + } + return fn_attr; + } + + private List _MakeFuncList(object funcList, string arg_name) + { + List res = new List(); + if(funcList is IEnumerable enumerable) + { + foreach(var func in enumerable) + { + res.Add(_MakeFunc(func, arg_name)); + } + } + return res; + } + private bool _IsListParameter(ArgDef arg) { if (!String.IsNullOrEmpty(arg.NumberAttr)) diff --git a/src/TensorFlowNET.Core/Operations/Operation.Output.cs b/src/TensorFlowNET.Core/Operations/Operation.Output.cs index 2955a13fa..2329a4786 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Output.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Output.cs @@ -34,7 +34,7 @@ public int OutputListLength(string name) return num; } - protected Tensor[] _outputs; + internal Tensor[] _outputs; public virtual Tensor[] outputs => _outputs; public Tensor output => _outputs.FirstOrDefault(); diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index a789c5f4b..5e689c655 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -46,9 +46,9 @@ namespace Tensorflow /// public partial class Operation : ITensorOrOperation { - private readonly IntPtr _handle; // _c_op in python + protected IntPtr _handle; // _c_op in python - private readonly Graph _graph; + protected Graph _graph; internal Func _gradient_function; @@ -69,6 +69,7 @@ public partial class Operation : ITensorOrOperation //private OperationDescription _op_desc; public NodeDef node_def => GetNodeDef(); + protected Operation() { } public Operation(IntPtr handle, Graph g = null) { diff --git a/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs b/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs index 08e73fe67..591760600 100644 --- a/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs @@ -17,6 +17,7 @@ limitations under the License. using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Eager; using Tensorflow.Framework; using static Tensorflow.Binding; @@ -38,10 +39,6 @@ public class _EagerTensorArray : TensorArray bool _infer_shape; public override bool infer_shape => _infer_shape; - public bool _dynamic_size; - public Shape _element_shape; - - public List _colocate_with; Tensor _handle; public override Tensor handle => _handle; @@ -56,6 +53,7 @@ public _EagerTensorArray(TF_DataType dtype, Tensor size, bool dynamic_size = fal bool infer_shape = true, Shape? element_shape = null, bool colocate_with_first_write_call = true, string name = null) { + _size = size; _flow = constant_op.constant(0); _infer_shape = infer_shape; _element_shape = element_shape ?? Shape.Null; diff --git a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs index dde2624af..4c3fde316 100644 --- a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs @@ -16,7 +16,9 @@ limitations under the License. using System; using System.Collections.Generic; +using System.Diagnostics; using System.Linq; +using Tensorflow.Common.Types; using Tensorflow.Eager; using static Tensorflow.Binding; @@ -33,18 +35,18 @@ public class _GraphTensorArray : TensorArray /// first tensor written to it. /// bool _colocate_with_first_write_call; - public bool colocate_with_first_write_call => _colocate_with_first_write_call; + public override bool colocate_with_first_write_call => _colocate_with_first_write_call; bool _infer_shape; - public bool infer_shape => _infer_shape; - public bool _dynamic_size; + public override bool infer_shape => _infer_shape; public List _element_shape; public List _colocate_with; internal Tensor _handle; - public Tensor handle => _handle; + public override Tensor handle => _handle; internal Tensor _flow; + public override Tensor flow => _flow; public _GraphTensorArray(TF_DataType dtype, Tensor size, bool? dynamic_size = null, bool? clear_after_read = null, string tensor_array_name = null, Tensor handle = null, Tensor flow = null, @@ -55,6 +57,7 @@ public _GraphTensorArray(TF_DataType dtype, Tensor size, bool? dynamic_size = nu dynamic_size = dynamic_size ?? false; _dynamic_size = dynamic_size.Value; _dtype = dtype; + _size = size; _colocate_with_first_write_call = colocate_with_first_write_call; if (colocate_with_first_write_call) @@ -235,4 +238,172 @@ public override Tensor gather(Tensor indices, string name = null) return value; } } + + public class _GraphTensorArrayV2 : TensorArray + { + internal TF_DataType _dtype; + public override TF_DataType dtype => _dtype; + + /// + /// Used to keep track of what tensors the TensorArray should be + /// colocated with. We choose to colocate the TensorArray with the + /// first tensor written to it. + /// + bool _colocate_with_first_write_call; + public override bool colocate_with_first_write_call => _colocate_with_first_write_call; + + bool _infer_shape; + public override bool infer_shape => _infer_shape; + public Shape _element_shape; + + public List _colocate_with; + + internal Tensor _handle; + public override Tensor handle => _handle; + internal Tensor _flow; + public override Tensor flow => _flow; + + public _GraphTensorArrayV2(TF_DataType dtype, Tensor size, bool? dynamic_size = null, + bool? clear_after_read = null, string tensor_array_name = null, Tensor handle = null, Tensor flow = null, + bool infer_shape = true, Shape? element_shape = null, + bool colocate_with_first_write_call = true, string name = null) + { + Debug.Assert(handle is null); + dynamic_size = dynamic_size ?? false; + _dynamic_size = dynamic_size.Value; + _size = size; + + if(flow is not null && flow.dtype != dtypes.variant) + { + throw new TypeError($"Expected `flow` to be a variant tensor, but received `{flow.dtype}` instead"); + } + if(flow is null && size is null) + { + throw new ValueError("Argument `size` must be provided if argument `flow` is not provided."); + } + if(flow is not null && size is not null) + { + throw new ValueError("Cannot provide both `flow` and `size` arguments at the same time."); + } + if(flow is not null && element_shape is not null) + { + throw new ValueError("Cannot provide both `flow` and `element_shape` arguments at the same time."); + } + + _dtype = dtype; + + _element_shape = element_shape; + _infer_shape = infer_shape; + tf_with(ops.name_scope(name, "TensorArrayV2", new object[] { size, flow }), scope => + { + if (flow is null) + { + _flow = list_ops.tensor_list_reserve(element_shape, size, dtype, scope.scope_name); + } + else + { + _flow = flow; + } + }); + + _colocate_with_first_write_call = false; + _colocate_with = null; + } + + public override TensorArray unstack(Tensor value, string name = null) + { + return tf_with(ops.name_scope(name, "TensorArrayUnstack", new { _flow, value }), delegate + { + value = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value"); + Debug.Assert(value.dtype == _dtype); + var flow_out = list_ops.tensor_list_from_tensor(value, value.shape.dims.Skip(1).ToArray()); + return tensor_array_ops.build_ta_with_new_flow(this, flow_out); + }); + } + + public TensorArray scatter(Tensor indices, Tensor value, string name = null) + { + return tf_with(ops.name_scope(name, "TensorArrayScatter", new { _flow, value, indices }), delegate + { + value = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value"); + Debug.Assert(value.dtype == _dtype); + var flow_out = list_ops.tensor_list_scatter(value, indices, _element_shape, _flow); + return tensor_array_ops.build_ta_with_new_flow(this, flow_out); + }); + } + + public override Tensor read(T index, string name = null) + { + if(index is Tensor tensor) + { + return read(tensor, name); + } + else + { + throw new TypeError("Please use non-generic method instead."); + } + } + + public Tensor read(Tensor index, string name = null) + { + return tf_with(tf.name_scope(name, "TensorArrayV2Read", new object[] { _flow, index }), scope => + { + return list_ops.tensor_list_get_item(_flow, index, _dtype, _element_shape, name); + }); + } + + public override TensorArray write(Tensor index, Tensor value, string name = null) + { + return tf_with(ops.name_scope(name, "TensorArrayV2Write", new { _flow, index, value }), delegate + { + value = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value"); + Debug.Assert(value.dtype == _dtype); + var flow_out = list_ops.tensor_list_set_item(_flow, index, value, _dynamic_size, name); + + return tensor_array_ops.build_ta_with_new_flow(this, flow_out); + }); + } + + public override TensorArray write(int index, T value, string name = null) + { + var value_tensor = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value"); + var index_tensor = ops.convert_to_tensor(index, name: "index"); + return write(index_tensor, value_tensor); + } + + private Tensor size(string name = null) + { + if(!_dynamic_size && _size is not null) + { + return ops.convert_to_tensor(_size, dtypes.int32); + } + else + { + return gen_list_ops.tensor_list_length(_flow, name); + } + } + + public override Tensor stack(string name = null) + { + return tf_with(ops.name_scope(name, "TensorArrayV2Stack", _flow), delegate + { + int ta_size; + if(!_dynamic_size && (_size is not null)) + { + ta_size = (int)tensor_util.constant_value(_size); + } + else + { + ta_size = -1; + } + var value = list_ops.tensor_list_stack(_flow, _dtype, ta_size, _element_shape); + return value; + }); + } + + public override Tensor gather(Tensor indices, string name = null) + { + return list_ops.tensor_list_gather(_flow, indices, _dtype, _element_shape, name); + } + } } diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index a0b47aace..ca9e5fae2 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -119,6 +119,27 @@ public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT } } + public static Tensor zeros(Tensors shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + { + dtype = dtype.as_base_dtype(); + Tensor shapeTensor; + if(shape.Length > 1) + { + shapeTensor = ops.convert_to_tensor(shape, dtypes.int32); + if(shapeTensor.ndim > 1) + { + shapeTensor = array_ops.reshape(shapeTensor, new Shape(-1)); + } + } + else + { + shapeTensor = shape[0]; + } + var output = fill(shapeTensor, array_ops.constant(0, dtype), name); + Debug.Assert(output.dtype.as_base_dtype() == dtype); + return output; + } + public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boolean_mask", int axis = 0) { return tf_with(ops.name_scope(name, values: new { tensor, mask }), delegate @@ -307,6 +328,9 @@ public static Tensor expand_dims(Tensor input, int axis = -1, string name = null public static Tensor fill(Shape dims, T value, string name = null) => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name); + public static Tensor fill(Tensor dims, T value, string name = null) + => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name); + /// /// Returns the rank of a tensor. /// diff --git a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs index 862b636fd..efd9aba35 100644 --- a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs +++ b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs @@ -675,16 +675,17 @@ public static Tensor ZerosLikeOutsideLoop(Operation op, int index) } } - public static Tensor[] while_loop(Func cond, - Func body, - Tensor[] loop_vars, + public static Tensors while_loop(Func cond, + Func body, + Tensors loop_vars, int parallel_iterations = 10, string name = null) { var executing_eagerly = tf.Context.executing_eagerly(); if (!executing_eagerly) { - throw new NotImplementedException(""); + return while_v2.while_loop(cond, body, loop_vars, parallel_iterations: parallel_iterations, + name: name); } return tf_with(ops.name_scope("name", "while"), delegate diff --git a/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs b/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs index c88911194..536d4e3c2 100644 --- a/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs +++ b/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs @@ -16,12 +16,20 @@ limitations under the License. using System; using System.Linq; +using Tensorflow.Functions; +using Tensorflow.Graphs; using Tensorflow.Operations; +using static Tensorflow.Binding; namespace Tensorflow { public class control_flow_util { + public static readonly bool ENABLE_CONTROL_FLOW_V2 = !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_CONTROL_FLOW_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_CONTROL_FLOW_V2") != "0" || + (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_CONTROL_FLOW_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_CONTROL_FLOW_V2") != "0") || + (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_COND_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_COND_V2") != "0") || + (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_WHILE_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_WHILE_V2") != "0") || + (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_TENSOR_ARRAY_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_TENSOR_ARRAY_V2") != "0"); /// /// Return true if `op` is an Exit. /// @@ -196,5 +204,74 @@ public static WhileContext GetContainingWhileContext(ControlFlowContext ctxt, Co } return null; } + + public static bool EnableControlFlowV2(Graph graph) + { + return ENABLE_CONTROL_FLOW_V2 || graph.building_function && (graph is not FuncGraph func || func.captures.Length == 0); + + } + + public static string create_new_tf_function(FuncGraph func_graph) + { + var func = new EagerDefinedFunction(func_graph.Name, func_graph, func_graph.Inputs, func_graph.Outputs, new Dictionary()); + func.AddToGraph(func_graph); + return func_graph.Name; + } + + public static (Operation, Tensor[]) get_op_and_outputs(Tensor[] inputs) + { + if(inputs.Length == 0) + { + return (null, new Tensor[0]); + } + else + { + return (inputs[0], inputs); + } + } + + public static Tensor[] run_as_function_for_tape_gradients(Func make_op, Tensor[] inputs) + { + if(gradients_util.PossibleTapeGradientTypes(inputs) == gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER + && !(ops.get_default_graph().building_function)) + { + throw new NotImplementedException(); + } + else + { + return make_op(inputs); + } + } + + public static string unique_fn_name(string scope, string name) + { + return $"{scope}{name}_{ops.uid()}".Replace("/", "_"); + } + + public static bool output_all_intermediates() + { + if (in_defun()) + { + return false; + } + if(tf.Context.FunctionCallOptions.ExecutorType == "SINGLE_THREADED_EXECUTOR") + { + return false; + } + // TODO(Rinne): check this after refactoring keras building. + return false; + } + + public static bool in_defun() + { + if (tf.Context.executing_eagerly()) + { + return false; + } + + var graph = ops.get_default_graph(); + // TODO(Rinne): CondBranchFuncGraph, WhileBodyFuncGraph, WhileCondFuncGraph + return graph is FuncGraph; + } } } diff --git a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs index 5663f9c97..e1cf1c138 100644 --- a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs @@ -1,128 +1,1032 @@ -using System; -using System.Collections.Generic; -using System.Text; -using System.Xml.Linq; -using Tensorflow.Contexts; +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ + using Tensorflow.Eager; -using Tensorflow.Functions; +using Tensorflow.Contexts; using static Tensorflow.Binding; -namespace Tensorflow.Operations +namespace Tensorflow; + +public static class gen_functional_ops { - public class gen_functional_ops + /// + /// An n-way switch statement which calls a single branch function. + /// + /// + /// + /// An n-way switch statement, implementing the following: + /// ``` + /// switch (branch_index) { + /// case 0: + /// output = branches[0](input); + /// break; + /// case 1: + /// output = branches[1](input); + /// break; + /// ... + /// case [[nbranches-1]]: + /// default: + /// output = branches[nbranches-1](input); + /// break; + /// } + /// ``` + /// + /// + /// + /// + /// + /// A list of output types. + /// + /// + /// + /// A list of functions each of which takes 'inputs' and returns a list of + /// tensors, whose types are the same as what every other branch returns. + /// + /// + /// + /// + public static Tensor[] _case(Tensor branch_index, Tensors input, TF_DataType[] Tout, object[] branches, Shape[] output_shapes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Case", name) { args = new object[] { branch_index, input }, attrs = new Dictionary() { ["Tout"] = Tout, ["branches"] = branches, ["output_shapes"] = output_shapes } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return case_eager_fallback(branch_index, input, Tout: Tout, branches: branches, output_shapes: output_shapes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["branch_index"] = branch_index; + keywords["input"] = input; + keywords["Tout"] = Tout; + keywords["branches"] = branches; + keywords["output_shapes"] = output_shapes; + var _op = tf.OpDefLib._apply_op_helper("Case", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "branches", _op.get_attr("branches"), "output_shapes", _op.get_attr("output_shapes") }; + _execute.record_gradient("Case", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] case_eager_fallback(Tensor branch_index, Tensor input, TF_DataType[] Tout, object[] branches, Shape[] output_shapes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { branch_index, input }; + object[] _attrs = new object[] { "branches", branches, "output_shapes", output_shapes }; + var _result = _execute.execute("Case", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Case", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Return the index of device the op runs. + /// + /// + /// + /// Given a list of device names, this operation returns the index of the device + /// this op runs. The length of the list is returned in two cases: + /// (1) Device does not exist in the given device list. + /// (2) It is in XLA compilation. + /// + /// + /// + /// + public static Tensor device_index(string[] device_names, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DeviceIndex", name) { args = new object[] { }, attrs = new Dictionary() { ["device_names"] = device_names } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return device_index_eager_fallback(device_names: device_names, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["device_names"] = device_names; + var _op = tf.OpDefLib._apply_op_helper("DeviceIndex", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "device_names", _op.get_attr("device_names") }; + _execute.record_gradient("DeviceIndex", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor device_index_eager_fallback(string[] device_names, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "device_names", device_names }; + var _result = _execute.execute("DeviceIndex", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DeviceIndex", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// ~~%~~ This op is used as a placeholder in If branch functions. It doesn't provide a~~%~~ valid output when run, so must either be removed (e.g. replaced with a~~%~~ function input) or guaranteed not to be used (e.g. if mirroring an~~%~~ intermediate output needed for the gradient computation of the other branch).~~%~~ + /// + /// + /// The type of the output. + /// + /// + /// + /// The purported shape of the output. This is only used for shape inference; + /// the output will not necessarily have this shape. Can be a partial shape. + /// + /// + /// + public static Tensor fake_param(TF_DataType dtype, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeParam", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_param_eager_fallback(dtype: dtype, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dtype"] = dtype; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("FakeParam", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("FakeParam", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_param_eager_fallback(TF_DataType dtype, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "dtype", dtype, "shape", shape }; + var _result = _execute.execute("FakeParam", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeParam", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Applies a for loop. + /// + /// + /// + /// ```python + /// output = input; + /// for i in range(start, limit, delta) + /// output = body(i, output); + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + /// A function that takes a list of tensors (int32, T) and returns another + /// list of tensors (T). + /// + /// + /// + public static Tensor[] _for(Tensor start, Tensor limit, Tensor delta, Tensors input, object body, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "For", name) { args = new object[] { start, limit, delta, input }, attrs = new Dictionary() { ["body"] = body } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return for_eager_fallback(start, limit, delta, input, body: body, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["start"] = start; + keywords["limit"] = limit; + keywords["delta"] = delta; + keywords["input"] = input; + keywords["body"] = body; + var _op = tf.OpDefLib._apply_op_helper("For", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T"), "body", _op.get_attr("body") }; + _execute.record_gradient("For", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] for_eager_fallback(Tensor start, Tensor limit, Tensor delta, Tensor input, object body, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { start, limit, delta, input }; + object[] _attrs = new object[] { "body", body }; + var _result = _execute.execute("For", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("For", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// output = cond ? then_branch(input) : else_branch(input) + /// + /// + /// + /// + /// A list of output types. + /// + /// + /// + /// A function that takes 'inputs' and returns a list of tensors, whose + /// types are the same as what else_branch returns. + /// + /// + /// + /// + /// A function that takes 'inputs' and returns a list of tensors, whose + /// types are the same as what then_branch returns. + /// + /// + /// + /// + public static Tensor[] _if(Tensor cond, Tensors input, TF_DataType[] Tout, object then_branch, object else_branch, Shape[] output_shapes, string? name = null) { - public static Tensor[] partitioned_call(Tensors args, TF_DataType[] tout, EagerDefinedFunction f, - string config = "", string config_proto = "", string executor_type = "", string name = null) + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - var ctx = tf.Context; - if (ctx.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "If", name) { args = new object[] { cond, input }, attrs = new Dictionary() { ["Tout"] = Tout, ["then_branch"] = then_branch, ["else_branch"] = else_branch, ["output_shapes"] = output_shapes } }); + return _fast_path_result; + } + catch (Exception) { - try - { - return tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "PartitionedCall", name, - args, tout, f, config, config_proto, executor_type)); - } - catch (Exception) - { + } + try + { + return if_eager_fallback(cond, input, Tout: Tout, then_branch: then_branch, else_branch: else_branch, output_shapes: output_shapes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["cond"] = cond; + keywords["input"] = input; + keywords["Tout"] = Tout; + keywords["then_branch"] = then_branch; + keywords["else_branch"] = else_branch; + keywords["output_shapes"] = output_shapes; + var _op = tf.OpDefLib._apply_op_helper("If", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tcond", _op._get_attr_type("Tcond"), "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "then_branch", _op.get_attr("then_branch"), "else_branch", _op.get_attr("else_branch"), "output_shapes", _op.get_attr("output_shapes") }; + _execute.record_gradient("If", _op.inputs, _attrs, _result); + } + return _result; + } - } + public static Tensor[] if_eager_fallback(Tensor cond, Tensor input, TF_DataType[] Tout, object then_branch, object else_branch, Shape[] output_shapes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { cond, input }; + object[] _attrs = new object[] { "Tcond", cond.dtype, "then_branch", then_branch, "else_branch", else_branch, "output_shapes", output_shapes }; + var _result = _execute.execute("If", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("If", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// returns `f(inputs)`, where `f`'s body is placed and partitioned. + /// + /// + /// + /// Asynchronously executes a function, potentially across multiple devices but + /// within a single process. The kernel places and partitions a given function's + /// underlying graph, and executes each of the partitioned subgraphs as a function. + /// + /// + /// + /// + /// A list of output types. + /// + /// + /// + /// A function that takes 'args', a list of tensors, and returns 'output', + /// another list of tensors. Input and output types are specified by 'Tin' + /// and 'Tout'. The function body of f will be placed and partitioned across + /// devices, setting this op apart from the regular Call op. + /// + /// + /// + /// + /// + /// + public static Tensor[] partitioned_call(Tensors args, TF_DataType[] Tout, object f, string config = "", string config_proto = "", string executor_type = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PartitionedCall", name) { args = new object[] { args }, attrs = new Dictionary() { ["Tout"] = Tout, ["f"] = f, ["config"] = config, ["config_proto"] = config_proto, ["executor_type"] = executor_type } }); + return _fast_path_result; } + catch (Exception) + { + } + try + { + return partitioned_call_eager_fallback(args, Tout: Tout, f: f, config: config, config_proto: config_proto, executor_type: executor_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (config is null) + { + config = ""; + } + if (config_proto is null) + { + config_proto = ""; + } + if (executor_type is null) + { + executor_type = ""; + } + Dictionary keywords = new(); + keywords["args"] = args; + keywords["Tout"] = Tout; + keywords["f"] = f; + keywords["config"] = config; + keywords["config_proto"] = config_proto; + keywords["executor_type"] = executor_type; + var _op = tf.OpDefLib._apply_op_helper("PartitionedCall", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", _op.get_attr("f"), "config", _op.get_attr("config"), "config_proto", _op.get_attr("config_proto"), "executor_type", _op.get_attr("executor_type") }; + _execute.record_gradient("PartitionedCall", _op.inputs, _attrs, _result); + } + return _result; + } - if (config is null) + public static Tensor[] partitioned_call_eager_fallback(Tensor args, TF_DataType[] Tout, object f, string config, string config_proto, string executor_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { args }; + object[] _attrs = new object[] { "f", f, "config", config, "config_proto", config_proto, "executor_type", executor_type }; + var _result = _execute.execute("PartitionedCall", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PartitionedCall", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Runs function `f` on a remote device indicated by `target`. + /// + /// + /// + /// + /// + /// The type list for the return values. + /// + /// + /// + /// + /// The function to run remotely. + /// + /// + /// + public static Tensor[] remote_call(Tensor target, Tensors args, TF_DataType[] Tout, object f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try { - config = ""; + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RemoteCall", name) { args = new object[] { target, args }, attrs = new Dictionary() { ["Tout"] = Tout, ["f"] = f } }); + return _fast_path_result; } - if (config_proto is null) + catch (Exception) { - config_proto = ""; } - if (executor_type is null) + try { - executor_type = ""; + return remote_call_eager_fallback(target, args, Tout: Tout, f: f, name: name, ctx: _ctx); } - Dictionary kwargs = new(); - kwargs["args"] = args; - kwargs["Tout"] = tout; - kwargs["f"] = f; - kwargs["config"] = config; - kwargs["config_proto"] = config_proto; - kwargs["executor_type"] = executor_type; - var output = tf.OpDefLib._apply_op_helper("PartitionedCall", - name, kwargs); - var result = output.outputs; - if (_execute.must_record_gradient()) + catch (Exception) { - throw new NotImplementedException(); } - return result; } + Dictionary keywords = new(); + keywords["target"] = target; + keywords["args"] = args; + keywords["Tout"] = Tout; + keywords["f"] = f; + var _op = tf.OpDefLib._apply_op_helper("RemoteCall", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", _op.get_attr("f") }; + _execute.record_gradient("RemoteCall", _op.inputs, _attrs, _result); + } + return _result; + } - public static Tensor[] partitioned_call_eager_fallback(Tensors args, TF_DataType[] tout, EagerDefinedFunction f, - string config, string config_proto, string executor_type, string name, Context ctx) + public static Tensor[] remote_call_eager_fallback(Tensor target, Tensor args, TF_DataType[] Tout, object f, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { target, args }; + object[] _attrs = new object[] { "f", f }; + var _result = _execute.execute("RemoteCall", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RemoteCall", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// returns `f(inputs)`, where `f`'s body is placed and partitioned. + /// + /// + /// + /// A list of output types. + /// + /// + /// + /// A function that takes 'args', a list of tensors, and returns 'output', + /// another list of tensors. Input and output types are specified by 'Tin' + /// and 'Tout'. The function body of f will be placed and partitioned across + /// devices, setting this op apart from the regular Call op. This op is + /// stateful. + /// + /// + /// + /// + /// + /// + public static Tensor[] stateful_partitioned_call(Tensors args, TF_DataType[] Tout, object f, string config = "", string config_proto = "", string executor_type = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - // TODO(Rinne): implement it. - throw new NotImplementedException(); - if(config is null) + try { - config = ""; + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatefulPartitionedCall", name) { args = new object[] { args }, attrs = new Dictionary() { ["Tout"] = Tout, ["f"] = f, ["config"] = config, ["config_proto"] = config_proto, ["executor_type"] = executor_type } }); + return _fast_path_result; } - if(config_proto is null) + catch (Exception) { - config_proto = ""; } - if(executor_type is null) + try { - executor_type = ""; + return stateful_partitioned_call_eager_fallback(args, Tout: Tout, f: f, config: config, config_proto: config_proto, executor_type: executor_type, name: name, ctx: _ctx); } - object[] attrs = new object[] + catch (Exception) { + } + } + if (config is null) + { + config = ""; + } + if (config_proto is null) + { + config_proto = ""; + } + if (executor_type is null) + { + executor_type = ""; + } + Dictionary keywords = new(); + keywords["args"] = args; + keywords["Tout"] = Tout; + keywords["f"] = f; + keywords["config"] = config; + keywords["config_proto"] = config_proto; + keywords["executor_type"] = executor_type; + var _op = tf.OpDefLib._apply_op_helper("StatefulPartitionedCall", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", _op.get_attr("f"), "config", _op.get_attr("config"), "config_proto", _op.get_attr("config_proto"), "executor_type", _op.get_attr("executor_type") }; + _execute.record_gradient("StatefulPartitionedCall", _op.inputs, _attrs, _result); + } + return _result; + } - }; + public static Tensor[] stateful_partitioned_call_eager_fallback(Tensor args, TF_DataType[] Tout, object f, string config, string config_proto, string executor_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { args }; + object[] _attrs = new object[] { "f", f, "config", config, "config_proto", config_proto, "executor_type", executor_type }; + var _result = _execute.execute("StatefulPartitionedCall", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StatefulPartitionedCall", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// An n-way switch statement which calls a single branch function. + /// + /// + /// + /// An n-way switch statement, implementing the following: + /// ``` + /// switch (branch_index) { + /// case 0: + /// output = branches[0](input); + /// break; + /// case 1: + /// output = branches[1](input); + /// break; + /// ... + /// case [[nbranches-1]]: + /// default: + /// output = branches[nbranches-1](input); + /// break; + /// } + /// ``` + /// + /// This should only be used when the none of branches has stateful ops. + /// + /// + /// + /// + /// + /// A list of output types. + /// + /// + /// + /// A list of functions each of which takes 'inputs' and returns a list of + /// tensors, whose types are the same as what every other branch returns. + /// + /// + /// + /// + public static Tensor[] stateless_case(Tensor branch_index, Tensors input, TF_DataType[] Tout, object[] branches, Shape[] output_shapes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessCase", name) { args = new object[] { branch_index, input }, attrs = new Dictionary() { ["Tout"] = Tout, ["branches"] = branches, ["output_shapes"] = output_shapes } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return stateless_case_eager_fallback(branch_index, input, Tout: Tout, branches: branches, output_shapes: output_shapes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["branch_index"] = branch_index; + keywords["input"] = input; + keywords["Tout"] = Tout; + keywords["branches"] = branches; + keywords["output_shapes"] = output_shapes; + var _op = tf.OpDefLib._apply_op_helper("StatelessCase", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "branches", _op.get_attr("branches"), "output_shapes", _op.get_attr("output_shapes") }; + _execute.record_gradient("StatelessCase", _op.inputs, _attrs, _result); } + return _result; + } - public static Tensor[] symbolic_gradient(Tensor[] input, TF_DataType[] Tout, NameAttrList f, string name = null) + public static Tensor[] stateless_case_eager_fallback(Tensor branch_index, Tensor input, TF_DataType[] Tout, object[] branches, Shape[] output_shapes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { branch_index, input }; + object[] _attrs = new object[] { "branches", branches, "output_shapes", output_shapes }; + var _result = _execute.execute("StatelessCase", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var ctx = tf.Context; - if (ctx.executing_eagerly()) + _execute.record_gradient("StatelessCase", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// output = cond ? then_branch(input) : else_branch(input) + /// + /// + /// + /// + /// A list of output types. + /// + /// + /// + /// A function that takes 'inputs' and returns a list of tensors, whose + /// types are the same as what else_branch returns. + /// + /// + /// + /// + /// A function that takes 'inputs' and returns a list of tensors, whose + /// types are the same as what then_branch returns. + /// + /// + /// + /// + public static Tensor[] stateless_if(Tensor cond, Tensors input, TF_DataType[] Tout, object then_branch, object else_branch, Shape[] output_shapes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessIf", name) { args = new object[] { cond, input }, attrs = new Dictionary() { ["Tout"] = Tout, ["then_branch"] = then_branch, ["else_branch"] = else_branch, ["output_shapes"] = output_shapes } }); + return _fast_path_result; + } + catch (Exception) + { + } + try { - try - { - var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - tf.Context, "SymbolicGradient", name, input, Tout, f)); - return _result; - } - catch (Exception) - { + return stateless_if_eager_fallback(cond, input, Tout: Tout, then_branch: then_branch, else_branch: else_branch, output_shapes: output_shapes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["cond"] = cond; + keywords["input"] = input; + keywords["Tout"] = Tout; + keywords["then_branch"] = then_branch; + keywords["else_branch"] = else_branch; + keywords["output_shapes"] = output_shapes; + var _op = tf.OpDefLib._apply_op_helper("StatelessIf", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tcond", _op._get_attr_type("Tcond"), "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "then_branch", _op.get_attr("then_branch"), "else_branch", _op.get_attr("else_branch"), "output_shapes", _op.get_attr("output_shapes") }; + _execute.record_gradient("StatelessIf", _op.inputs, _attrs, _result); + } + return _result; + } - } + public static Tensor[] stateless_if_eager_fallback(Tensor cond, Tensor input, TF_DataType[] Tout, object then_branch, object else_branch, Shape[] output_shapes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { cond, input }; + object[] _attrs = new object[] { "Tcond", cond.dtype, "then_branch", then_branch, "else_branch", else_branch, "output_shapes", output_shapes }; + var _result = _execute.execute("StatelessIf", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StatelessIf", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// output = input; While (Cond(output)) { output = Body(output) } + /// + /// + /// + /// + /// A function takes 'input' and returns a tensor. If the tensor is + /// a scalar of non-boolean, the scalar is converted to a boolean + /// according to the following rule: if the scalar is a numerical + /// value, non-zero means True and zero means False; if the scalar is + /// a string, non-empty means True and empty means False. If the + /// tensor is not a scalar, non-emptiness means True and False + /// otherwise. + /// + /// This should only be used when the while condition and body functions + /// do not have stateful ops. + /// + /// + /// + /// + /// A function that takes a list of tensors and returns another + /// list of tensors. Both lists have the same types as specified + /// by T. + /// + /// + /// + /// + /// + public static Tensor[] stateless_while(Tensors input, object cond, object body, Shape[] output_shapes, int parallel_iterations = 10, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessWhile", name) { args = new object[] { input }, attrs = new Dictionary() { ["cond"] = cond, ["body"] = body, ["output_shapes"] = output_shapes, ["parallel_iterations"] = parallel_iterations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return stateless_while_eager_fallback(input, cond: cond, body: body, output_shapes: output_shapes, parallel_iterations: parallel_iterations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["cond"] = cond; + keywords["body"] = body; + keywords["output_shapes"] = output_shapes; + keywords["parallel_iterations"] = parallel_iterations; + var _op = tf.OpDefLib._apply_op_helper("StatelessWhile", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T"), "cond", _op.get_attr("cond"), "body", _op.get_attr("body"), "output_shapes", _op.get_attr("output_shapes"), "parallel_iterations", _op._get_attr_int("parallel_iterations") }; + _execute.record_gradient("StatelessWhile", _op.inputs, _attrs, _result); + } + return _result; + } - try - { - return symbolic_gradient_eager_fallback(input, Tout, f, name, ctx); - } - catch (Exception) - { + public static Tensor[] stateless_while_eager_fallback(Tensor input, object cond, object body, Shape[] output_shapes, int parallel_iterations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "cond", cond, "body", body, "output_shapes", output_shapes, "parallel_iterations", parallel_iterations }; + var _result = _execute.execute("StatelessWhile", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StatelessWhile", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes the gradient function for function f via backpropagation. + /// + /// + /// + /// + /// the type list for the input list. + /// + /// + /// + /// + /// The function we want to compute the gradient for. + /// + /// The function 'f' must be a numerical function which takes N inputs and + /// produces M outputs. Its gradient function 'g', which is computed by + /// this SymbolicGradient op is a function taking N + M inputs and + /// produces N outputs. + /// + /// I.e. if we have + /// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), + /// then, g is + /// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, + /// dL/dy1, dL/dy2, ..., dL/dy_M), + /// + /// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the + /// loss function). dL/dx_i is the partial derivative of L with respect + /// to x_i. + /// + /// (Needs some math expert to say the comment above better.) + /// + /// + /// + public static Tensor[] symbolic_gradient(Tensors input, TF_DataType[] Tout, object f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SymbolicGradient", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout, ["f"] = f } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return symbolic_gradient_eager_fallback(input, Tout: Tout, f: f, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["Tout"] = Tout; + keywords["f"] = f; + var _op = tf.OpDefLib._apply_op_helper("SymbolicGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", _op.get_attr("f") }; + _execute.record_gradient("SymbolicGradient", _op.inputs, _attrs, _result); + } + return _result; + } - } + public static Tensor[] symbolic_gradient_eager_fallback(Tensor input, TF_DataType[] Tout, object f, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "f", f }; + var _result = _execute.execute("SymbolicGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SymbolicGradient", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Converts a tensor to a scalar predicate. + /// + /// + /// + /// Converts a tensor to a scalar predicate with the following rules: + /// + /// - For 0D tensors, truthiness is determined by comparing against a "zero" + /// value. For numerical types it is the obvious zero. For strings it is the + /// empty string. + /// + /// - For >0D tensors, truthiness is determined by looking at the number of + /// elements. If has zero elements, then the result is false. Otherwise the + /// result is true. + /// + /// This matches the behavior of If and While for determining if a tensor counts + /// as true/false for a branch condition. + /// + /// + /// + /// + public static Tensor to_bool(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ToBool", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { } - var op = tf.OpDefLib._apply_op_helper("SymbolicGradient", name, new object[] { input, Tout, f }); - var result = op.outputs; - if (_execute.must_record_gradient()) + try { - throw new NotImplementedException(); + return to_bool_eager_fallback(input, name: name, ctx: _ctx); } - return result; + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("ToBool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ToBool", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor[] symbolic_gradient_eager_fallback(Tensor[] input, TF_DataType[] Tout, NameAttrList f, string name, Context ctx) + public static Tensor to_bool_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("ToBool", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ToBool", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// output = input; While (Cond(output)) { output = Body(output) } + /// + /// + /// + /// + /// A function takes 'input' and returns a tensor. If the tensor is + /// a scalar of non-boolean, the scalar is converted to a boolean + /// according to the following rule: if the scalar is a numerical + /// value, non-zero means True and zero means False; if the scalar is + /// a string, non-empty means True and empty means False. If the + /// tensor is not a scalar, non-emptiness means True and False + /// otherwise. + /// + /// + /// + /// + /// A function that takes a list of tensors and returns another + /// list of tensors. Both lists have the same types as specified + /// by T. + /// + /// + /// + /// + /// + public static Tensor[] _while(Tensors input, object cond, object body, Shape[] output_shapes, int parallel_iterations = 10, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - object[] attrs = new object[] { "Tin", input, "Tout", Tout, "f", f }; - var result = _execute.execute("SymbolicGradient", Tout.Length, input, attrs, ctx, name); - if (_execute.must_record_gradient()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "While", name) { args = new object[] { input }, attrs = new Dictionary() { ["cond"] = cond, ["body"] = body, ["output_shapes"] = output_shapes, ["parallel_iterations"] = parallel_iterations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return while_eager_fallback(input, cond: cond, body: body, output_shapes: output_shapes, parallel_iterations: parallel_iterations, name: name, ctx: _ctx); + } + catch (Exception) { - throw new NotImplementedException(); } - return result; } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["cond"] = cond; + keywords["body"] = body; + keywords["output_shapes"] = output_shapes; + keywords["parallel_iterations"] = parallel_iterations; + var _op = tf.OpDefLib._apply_op_helper("While", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T"), "cond", _op.get_attr("cond"), "body", _op.get_attr("body"), "output_shapes", _op.get_attr("output_shapes"), "parallel_iterations", _op._get_attr_int("parallel_iterations") }; + _execute.record_gradient("While", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] while_eager_fallback(Tensor input, object cond, object body, Shape[] output_shapes, int parallel_iterations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "cond", cond, "body", body, "output_shapes", output_shapes, "parallel_iterations", parallel_iterations }; + var _result = _execute.execute("While", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("While", _inputs_flat, _attrs, _result); + } + return _result; } } diff --git a/src/TensorFlowNET.Core/Operations/gen_list_ops.cs b/src/TensorFlowNET.Core/Operations/gen_list_ops.cs new file mode 100644 index 000000000..e72539866 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/gen_list_ops.cs @@ -0,0 +1,1227 @@ +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ + +using Tensorflow.Eager; +using Tensorflow.Contexts; +using static Tensorflow.Binding; + +namespace Tensorflow; + +public static class gen_list_ops +{ + /// + /// Creates and returns an empty tensor list. + /// + /// + /// + /// All list elements must be tensors of dtype element_dtype and shape compatible + /// with element_shape. + /// + /// handle: an empty tensor list. + /// element_dtype: the type of elements in the list. + /// element_shape: a shape compatible with that of elements in the list. + /// + /// + /// + /// + /// + /// + public static Tensor empty_tensor_list(Tensor element_shape, Tensor max_num_elements, TF_DataType element_dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EmptyTensorList", name) { args = new object[] { element_shape, max_num_elements }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return empty_tensor_list_eager_fallback(element_shape, max_num_elements, element_dtype: element_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["element_shape"] = element_shape; + keywords["max_num_elements"] = max_num_elements; + keywords["element_dtype"] = element_dtype; + var _op = tf.OpDefLib._apply_op_helper("EmptyTensorList", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") }; + _execute.record_gradient("EmptyTensorList", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor empty_tensor_list_eager_fallback(Tensor element_shape, Tensor max_num_elements, TF_DataType element_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { element_shape, max_num_elements }; + object[] _attrs = new object[] { "element_dtype", element_dtype, "shape_type", element_shape.dtype }; + var _result = _execute.execute("EmptyTensorList", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EmptyTensorList", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Concats all tensors in the list along the 0th dimension. + /// + /// + /// + /// Requires that all tensors have the same shape except the first dimension. + /// + /// input_handle: The input list. + /// tensor: The concated result. + /// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] tensor_list_concat(Tensor input_handle, TF_DataType element_dtype, Shape element_shape = null, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcat", name) { args = new object[] { input_handle }, attrs = new Dictionary() { ["element_dtype"] = element_dtype, ["element_shape"] = element_shape } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return tensor_list_concat_eager_fallback(input_handle, element_dtype: element_dtype, element_shape: element_shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["element_dtype"] = element_dtype; + keywords["element_shape"] = element_shape; + var _op = tf.OpDefLib._apply_op_helper("TensorListConcat", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "element_shape", _op.get_attr("element_shape") }; + _execute.record_gradient("TensorListConcat", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] tensor_list_concat_eager_fallback(Tensor input_handle, TF_DataType element_dtype, Shape element_shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle }; + object[] _attrs = new object[] { "element_dtype", element_dtype, "element_shape", element_shape }; + var _result = _execute.execute("TensorListConcat", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListConcat", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_concat_lists(Tensor input_a, Tensor input_b, TF_DataType element_dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcatLists", name) { args = new object[] { input_a, input_b }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_concat_lists_eager_fallback(input_a, input_b, element_dtype: element_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_a"] = input_a; + keywords["input_b"] = input_b; + keywords["element_dtype"] = element_dtype; + var _op = tf.OpDefLib._apply_op_helper("TensorListConcatLists", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") }; + _execute.record_gradient("TensorListConcatLists", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_concat_lists_eager_fallback(Tensor input_a, Tensor input_b, TF_DataType element_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_a, input_b }; + object[] _attrs = new object[] { "element_dtype", element_dtype }; + var _result = _execute.execute("TensorListConcatLists", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListConcatLists", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Concats all tensors in the list along the 0th dimension. + /// + /// + /// + /// Requires that all tensors have the same shape except the first dimension. + /// + /// input_handle: The input list. + /// element_shape: The shape of the uninitialized elements in the list. If the first + /// dimension is not -1, it is assumed that all list elements have the same + /// leading dim. + /// leading_dims: The list of leading dims of uninitialized list elements. Used if + /// the leading dim of input_handle.element_shape or the element_shape input arg + /// is not already set. + /// tensor: The concated result. + /// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] tensor_list_concat_v2(Tensor input_handle, Tensor element_shape, Tensor leading_dims, TF_DataType element_dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcatV2", name) { args = new object[] { input_handle, element_shape, leading_dims }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return tensor_list_concat_v2_eager_fallback(input_handle, element_shape, leading_dims, element_dtype: element_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["element_shape"] = element_shape; + keywords["leading_dims"] = leading_dims; + keywords["element_dtype"] = element_dtype; + var _op = tf.OpDefLib._apply_op_helper("TensorListConcatV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") }; + _execute.record_gradient("TensorListConcatV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] tensor_list_concat_v2_eager_fallback(Tensor input_handle, Tensor element_shape, Tensor leading_dims, TF_DataType element_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, element_shape, leading_dims }; + object[] _attrs = new object[] { "element_dtype", element_dtype, "shape_type", element_shape.dtype }; + var _result = _execute.execute("TensorListConcatV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListConcatV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// The shape of the elements of the given list, as a tensor. + /// + /// + /// + /// input_handle: the list + /// element_shape: the shape of elements of the list + /// + /// + /// + /// + /// + public static Tensor tensor_list_element_shape(Tensor input_handle, TF_DataType shape_type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListElementShape", name) { args = new object[] { input_handle }, attrs = new Dictionary() { ["shape_type"] = shape_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_element_shape_eager_fallback(input_handle, shape_type: shape_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["shape_type"] = shape_type; + var _op = tf.OpDefLib._apply_op_helper("TensorListElementShape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "shape_type", _op._get_attr_type("shape_type") }; + _execute.record_gradient("TensorListElementShape", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_element_shape_eager_fallback(Tensor input_handle, TF_DataType shape_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle }; + object[] _attrs = new object[] { "shape_type", shape_type }; + var _result = _execute.execute("TensorListElementShape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListElementShape", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Creates a TensorList which, when stacked, has the value of `tensor`. + /// + /// + /// + /// Each tensor in the result list corresponds to one row of the input tensor. + /// + /// tensor: The input tensor. + /// output_handle: The list. + /// + /// + /// + /// + /// + public static Tensor tensor_list_from_tensor(Tensor tensor, Tensor element_shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListFromTensor", name) { args = new object[] { tensor, element_shape }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_from_tensor_eager_fallback(tensor, element_shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["element_shape"] = element_shape; + var _op = tf.OpDefLib._apply_op_helper("TensorListFromTensor", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") }; + _execute.record_gradient("TensorListFromTensor", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_from_tensor_eager_fallback(Tensor tensor, Tensor element_shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, element_shape }; + object[] _attrs = new object[] { "element_dtype", tensor.dtype, "shape_type", element_shape.dtype }; + var _result = _execute.execute("TensorListFromTensor", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListFromTensor", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Creates a Tensor by indexing into the TensorList. + /// + /// + /// + /// Each row in the produced Tensor corresponds to the element in the TensorList + /// specified by the given index (see `tf.gather`). + /// + /// input_handle: The input tensor list. + /// indices: The indices used to index into the list. + /// values: The tensor. + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_gather(Tensor input_handle, Tensor indices, Tensor element_shape, TF_DataType element_dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListGather", name) { args = new object[] { input_handle, indices, element_shape }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_gather_eager_fallback(input_handle, indices, element_shape, element_dtype: element_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["indices"] = indices; + keywords["element_shape"] = element_shape; + keywords["element_dtype"] = element_dtype; + var _op = tf.OpDefLib._apply_op_helper("TensorListGather", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") }; + _execute.record_gradient("TensorListGather", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_gather_eager_fallback(Tensor input_handle, Tensor indices, Tensor element_shape, TF_DataType element_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, indices, element_shape }; + object[] _attrs = new object[] { "element_dtype", element_dtype }; + var _result = _execute.execute("TensorListGather", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListGather", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_get_item(Tensor input_handle, Tensor index, Tensor element_shape, TF_DataType element_dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListGetItem", name) { args = new object[] { input_handle, index, element_shape }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_get_item_eager_fallback(input_handle, index, element_shape, element_dtype: element_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["index"] = index; + keywords["element_shape"] = element_shape; + keywords["element_dtype"] = element_dtype; + var _op = tf.OpDefLib._apply_op_helper("TensorListGetItem", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") }; + _execute.record_gradient("TensorListGetItem", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_get_item_eager_fallback(Tensor input_handle, Tensor index, Tensor element_shape, TF_DataType element_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, index, element_shape }; + object[] _attrs = new object[] { "element_dtype", element_dtype }; + var _result = _execute.execute("TensorListGetItem", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListGetItem", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the number of tensors in the input tensor list. + /// + /// + /// + /// input_handle: the input list + /// length: the number of tensors in the list + /// + /// + /// + /// + public static Tensor tensor_list_length(Tensor input_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListLength", name) { args = new object[] { input_handle }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_length_eager_fallback(input_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + var _op = tf.OpDefLib._apply_op_helper("TensorListLength", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("TensorListLength", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_length_eager_fallback(Tensor input_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("TensorListLength", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListLength", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the last element of the input list as well as a list with all but that element. + /// + /// + /// + /// Fails if the list is empty. + /// + /// input_handle: the input list + /// tensor: the withdrawn last element of the list + /// element_dtype: the type of elements in the list + /// element_shape: the shape of the output tensor + /// + /// + /// + /// + /// + /// + public static Tensor[] tensor_list_pop_back(Tensor input_handle, Tensor element_shape, TF_DataType element_dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPopBack", name) { args = new object[] { input_handle, element_shape }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return tensor_list_pop_back_eager_fallback(input_handle, element_shape, element_dtype: element_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["element_shape"] = element_shape; + keywords["element_dtype"] = element_dtype; + var _op = tf.OpDefLib._apply_op_helper("TensorListPopBack", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") }; + _execute.record_gradient("TensorListPopBack", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] tensor_list_pop_back_eager_fallback(Tensor input_handle, Tensor element_shape, TF_DataType element_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, element_shape }; + object[] _attrs = new object[] { "element_dtype", element_dtype }; + var _result = _execute.execute("TensorListPopBack", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListPopBack", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. + /// + /// + /// + /// tensor: The tensor to put on the list. + /// input_handle: The old list. + /// output_handle: A list with the elements of the old list followed by tensor. + /// element_dtype: the type of elements in the list. + /// element_shape: a shape compatible with that of elements in the list. + /// + /// + /// + /// + /// + public static Tensor tensor_list_push_back(Tensor input_handle, Tensor tensor, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPushBack", name) { args = new object[] { input_handle, tensor }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_push_back_eager_fallback(input_handle, tensor, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["tensor"] = tensor; + var _op = tf.OpDefLib._apply_op_helper("TensorListPushBack", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") }; + _execute.record_gradient("TensorListPushBack", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_push_back_eager_fallback(Tensor input_handle, Tensor tensor, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, tensor }; + object[] _attrs = new object[] { "element_dtype", tensor.dtype }; + var _result = _execute.execute("TensorListPushBack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListPushBack", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_push_back_batch(Tensor input_handles, Tensor tensor, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPushBackBatch", name) { args = new object[] { input_handles, tensor }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_push_back_batch_eager_fallback(input_handles, tensor, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handles"] = input_handles; + keywords["tensor"] = tensor; + var _op = tf.OpDefLib._apply_op_helper("TensorListPushBackBatch", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") }; + _execute.record_gradient("TensorListPushBackBatch", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_push_back_batch_eager_fallback(Tensor input_handles, Tensor tensor, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handles, tensor }; + object[] _attrs = new object[] { "element_dtype", tensor.dtype }; + var _result = _execute.execute("TensorListPushBackBatch", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListPushBackBatch", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// List of the given size with empty elements. + /// + /// + /// + /// element_shape: the shape of the future elements of the list + /// num_elements: the number of elements to reserve + /// handle: the output list + /// element_dtype: the desired type of elements in the list. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_reserve(Tensor element_shape, Tensor num_elements, TF_DataType element_dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListReserve", name) { args = new object[] { element_shape, num_elements }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_reserve_eager_fallback(element_shape, num_elements, element_dtype: element_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["element_shape"] = element_shape; + keywords["num_elements"] = num_elements; + keywords["element_dtype"] = element_dtype; + var _op = tf.OpDefLib._apply_op_helper("TensorListReserve", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") }; + _execute.record_gradient("TensorListReserve", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_reserve_eager_fallback(Tensor element_shape, Tensor num_elements, TF_DataType element_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { element_shape, num_elements }; + object[] _attrs = new object[] { "element_dtype", element_dtype, "shape_type", element_shape.dtype }; + var _result = _execute.execute("TensorListReserve", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListReserve", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Resizes the list. + /// + /// + /// + /// + /// input_handle: the input list + /// size: size of the output list + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_resize(Tensor input_handle, Tensor size, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListResize", name) { args = new object[] { input_handle, size }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_resize_eager_fallback(input_handle, size, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["size"] = size; + var _op = tf.OpDefLib._apply_op_helper("TensorListResize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("TensorListResize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_resize_eager_fallback(Tensor input_handle, Tensor size, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, size }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("TensorListResize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListResize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Creates a TensorList by indexing into a Tensor. + /// + /// + /// + /// Each member of the TensorList corresponds to one row of the input tensor, + /// specified by the given index (see `tf.gather`). + /// + /// tensor: The input tensor. + /// indices: The indices used to index into the list. + /// element_shape: The shape of the elements in the list (can be less specified than + /// the shape of the tensor). + /// output_handle: The TensorList. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_scatter(Tensor tensor, Tensor indices, Tensor element_shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatter", name) { args = new object[] { tensor, indices, element_shape }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_scatter_eager_fallback(tensor, indices, element_shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["element_shape"] = element_shape; + var _op = tf.OpDefLib._apply_op_helper("TensorListScatter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") }; + _execute.record_gradient("TensorListScatter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_scatter_eager_fallback(Tensor tensor, Tensor indices, Tensor element_shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, element_shape }; + object[] _attrs = new object[] { "element_dtype", tensor.dtype, "shape_type", element_shape.dtype }; + var _result = _execute.execute("TensorListScatter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListScatter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Scatters tensor at indices in an input list. + /// + /// + /// + /// Each member of the TensorList corresponds to one row of the input tensor, + /// specified by the given index (see `tf.gather`). + /// + /// input_handle: The list to scatter into. + /// tensor: The input tensor. + /// indices: The indices used to index into the list. + /// output_handle: The TensorList. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_scatter_into_existing_list(Tensor input_handle, Tensor tensor, Tensor indices, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatterIntoExistingList", name) { args = new object[] { input_handle, tensor, indices }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_scatter_into_existing_list_eager_fallback(input_handle, tensor, indices, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["tensor"] = tensor; + keywords["indices"] = indices; + var _op = tf.OpDefLib._apply_op_helper("TensorListScatterIntoExistingList", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") }; + _execute.record_gradient("TensorListScatterIntoExistingList", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_scatter_into_existing_list_eager_fallback(Tensor input_handle, Tensor tensor, Tensor indices, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, tensor, indices }; + object[] _attrs = new object[] { "element_dtype", tensor.dtype }; + var _result = _execute.execute("TensorListScatterIntoExistingList", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListScatterIntoExistingList", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Creates a TensorList by indexing into a Tensor. + /// + /// + /// + /// Each member of the TensorList corresponds to one row of the input tensor, + /// specified by the given index (see `tf.gather`). + /// + /// tensor: The input tensor. + /// indices: The indices used to index into the list. + /// element_shape: The shape of the elements in the list (can be less specified than + /// the shape of the tensor). + /// num_elements: The size of the output list. Must be large enough to accommodate + /// the largest index in indices. If -1, the list is just large enough to include + /// the largest index in indices. + /// output_handle: The TensorList. + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_scatter_v2(Tensor tensor, Tensor indices, Tensor element_shape, Tensor num_elements, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatterV2", name) { args = new object[] { tensor, indices, element_shape, num_elements }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_scatter_v2_eager_fallback(tensor, indices, element_shape, num_elements, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["element_shape"] = element_shape; + keywords["num_elements"] = num_elements; + var _op = tf.OpDefLib._apply_op_helper("TensorListScatterV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") }; + _execute.record_gradient("TensorListScatterV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_scatter_v2_eager_fallback(Tensor tensor, Tensor indices, Tensor element_shape, Tensor num_elements, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, element_shape, num_elements }; + object[] _attrs = new object[] { "element_dtype", tensor.dtype, "shape_type", element_shape.dtype }; + var _result = _execute.execute("TensorListScatterV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListScatterV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_set_item(Tensor input_handle, Tensor index, Tensor item, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListSetItem", name) { args = new object[] { input_handle, index, item }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_set_item_eager_fallback(input_handle, index, item, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["index"] = index; + keywords["item"] = item; + var _op = tf.OpDefLib._apply_op_helper("TensorListSetItem", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") }; + _execute.record_gradient("TensorListSetItem", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_set_item_eager_fallback(Tensor input_handle, Tensor index, Tensor item, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, index, item }; + object[] _attrs = new object[] { "element_dtype", item.dtype }; + var _result = _execute.execute("TensorListSetItem", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListSetItem", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Splits a tensor into a list. + /// + /// + /// + /// list[i] corresponds to lengths[i] tensors from the input tensor. + /// The tensor must have rank at least 1 and contain exactly sum(lengths) elements. + /// + /// tensor: The input tensor. + /// element_shape: A shape compatible with that of elements in the tensor. + /// lengths: Vector of sizes of the 0th dimension of tensors in the list. + /// output_handle: The list. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_split(Tensor tensor, Tensor element_shape, Tensor lengths, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListSplit", name) { args = new object[] { tensor, element_shape, lengths }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_split_eager_fallback(tensor, element_shape, lengths, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["element_shape"] = element_shape; + keywords["lengths"] = lengths; + var _op = tf.OpDefLib._apply_op_helper("TensorListSplit", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") }; + _execute.record_gradient("TensorListSplit", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_split_eager_fallback(Tensor tensor, Tensor element_shape, Tensor lengths, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, element_shape, lengths }; + object[] _attrs = new object[] { "element_dtype", tensor.dtype, "shape_type", element_shape.dtype }; + var _result = _execute.execute("TensorListSplit", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListSplit", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Stacks all tensors in the list. + /// + /// + /// + /// Requires that all tensors have the same shape. + /// + /// input_handle: the input list + /// tensor: the gathered result + /// num_elements: optional. If not -1, the number of elements in the list. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_list_stack(Tensor input_handle, Tensor element_shape, TF_DataType element_dtype, int num_elements = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListStack", name) { args = new object[] { input_handle, element_shape }, attrs = new Dictionary() { ["element_dtype"] = element_dtype, ["num_elements"] = num_elements } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_list_stack_eager_fallback(input_handle, element_shape, element_dtype: element_dtype, num_elements: num_elements, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input_handle"] = input_handle; + keywords["element_shape"] = element_shape; + keywords["element_dtype"] = element_dtype; + keywords["num_elements"] = num_elements; + var _op = tf.OpDefLib._apply_op_helper("TensorListStack", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "num_elements", _op._get_attr_int("num_elements") }; + _execute.record_gradient("TensorListStack", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_list_stack_eager_fallback(Tensor input_handle, Tensor element_shape, TF_DataType element_dtype, int num_elements, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_handle, element_shape }; + object[] _attrs = new object[] { "element_dtype", element_dtype, "num_elements", num_elements }; + var _result = _execute.execute("TensorListStack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorListStack", _inputs_flat, _attrs, _result); + } + return _result[0]; + } +} diff --git a/src/TensorFlowNET.Core/Operations/list_ops.cs b/src/TensorFlowNET.Core/Operations/list_ops.cs new file mode 100644 index 000000000..c5e83ee41 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/list_ops.cs @@ -0,0 +1,111 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Eager; + +namespace Tensorflow.Operations +{ + internal class list_ops + { + private static void _set_handle_data(Tensor list_handle, Shape element_shape, TF_DataType element_dtype) + { + if(list_handle is EagerTensor eagerTensor) + { + var handle_data = new CppShapeInferenceResult.Types.HandleData(); + handle_data.IsSet = true; + handle_data.ShapeAndType.Add(new CppShapeInferenceResult.Types.HandleShapeAndType() + { + Shape = element_shape.as_proto(), + Dtype = element_dtype.as_datatype_enum(), + Type = new FullTypeDef() { TypeId = FullTypeId.TftArray } + }); + list_handle.HandleData = handle_data; + } + } + + private static Tensor _build_element_shape(Shape? shape) + { + if(shape is null || shape.IsNull) + { + return ops.convert_to_tensor(-1); + } + else + { + return ops.convert_to_tensor(shape); + } + } + + public static Tensor tensor_list_reserve(Shape? shape, Tensor num_elements, TF_DataType element_dtype, string name = null) + { + var result = gen_list_ops.tensor_list_reserve(_build_element_shape(shape), num_elements, element_dtype, name); + _set_handle_data(result, shape, element_dtype); + return result; + } + + public static Tensor tensor_list_from_tensor(Tensor tensor, Shape element_shape, string? name = null) + { + var result = gen_list_ops.tensor_list_from_tensor(tensor, _build_element_shape(element_shape), name); + _set_handle_data(result, tensor.shape, tensor.dtype); + return result; + } + + public static Tensor tensor_list_get_item(Tensor input_handle, Tensor index, TF_DataType element_dtype, + Shape? element_shape = null, string? name = null) + { + return gen_list_ops.tensor_list_get_item(input_handle, index, _build_element_shape(element_shape), + element_dtype, name); + } + + public static Tensor tensor_list_set_item(Tensor input_handle, Tensor index, Tensor item, + bool resize_if_index_out_of_bounds = false, string? name = null) + { + if (resize_if_index_out_of_bounds) + { + var input_list_size = gen_list_ops.tensor_list_length(input_handle); + input_handle = control_flow_ops.cond(index >= input_list_size, + () => gen_list_ops.tensor_list_resize(input_handle, index + 1), + () => input_handle); + } + var output_handle = gen_list_ops.tensor_list_set_item(input_handle, index, item, name); + handle_data_util.copy_handle_data(input_handle, output_handle); + return output_handle; + } + + public static Tensor tensor_list_stack(Tensor input_handle, TF_DataType element_dtype, int num_elements = -1, + Shape? element_shape = null, string? name = null) + { + return gen_list_ops.tensor_list_stack(input_handle, _build_element_shape(element_shape), element_dtype, num_elements, name); + } + + public static Tensor tensor_list_gather(Tensor input_handle, Tensor indices, TF_DataType element_dtype, + Shape? element_shape = null, string? name = null) + { + return gen_list_ops.tensor_list_gather(input_handle, indices, _build_element_shape(element_shape), element_dtype, name); + } + + public static Tensor tensor_list_scatter(Tensor tensor, Tensor indices, Shape? element_shape = null, Tensor? input_handle = null, + string? name = null) + { + if(input_handle is not null) + { + var output_handle = gen_list_ops.tensor_list_scatter_into_existing_list(input_handle, tensor, indices, name); + handle_data_util.copy_handle_data(input_handle, output_handle); + return output_handle; + } + else + { + var output_handle = gen_list_ops.tensor_list_scatter_v2(tensor, indices, _build_element_shape(element_shape), + constant_op.constant(-1), name); + _set_handle_data(output_handle, element_shape, tensor.dtype); + return output_handle; + } + } + + public static Tensor empty_tensor_list(Shape? element_shape, TF_DataType element_dtype, int max_num_elements = -1, + string? name = null) + { + return gen_list_ops.empty_tensor_list(_build_element_shape(element_shape), element_dtype: element_dtype, + max_num_elements: ops.convert_to_tensor(max_num_elements, dtype: dtypes.int32), name: name); + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/tensor_array_ops.cs b/src/TensorFlowNET.Core/Operations/tensor_array_ops.cs index 7d2da544c..6be0706c2 100644 --- a/src/TensorFlowNET.Core/Operations/tensor_array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/tensor_array_ops.cs @@ -13,11 +13,23 @@ public class tensor_array_ops /// public static TensorArray build_ta_with_new_flow(TensorArray old_ta, Tensor flow) { - var new_ta = tf.TensorArray( - dtype: old_ta.dtype, - infer_shape: old_ta.infer_shape, + if (!tf.Context.executing_eagerly() && old_ta is not _GraphTensorArrayV2 && control_flow_util.EnableControlFlowV2(ops.get_default_graph())) + { + throw new NotImplementedException("Attempting to build a graph-mode TF2-style " + + "TensorArray from either an eager-mode " + + "TensorArray or a TF1-style TensorArray. " + + "This is not currently supported. You may be " + + "attempting to capture a TensorArray " + + "inside a tf.function or tf.data map function. " + + "Instead, construct a new TensorArray inside " + + "the function."); + } + var new_ta = TensorArray.Create(old_ta.dtype, handle: old_ta.handle, flow: flow, infer_shape: old_ta.infer_shape, colocate_with_first_write_call: old_ta.colocate_with_first_write_call); - + new_ta._dynamic_size = old_ta._dynamic_size; + new_ta._size = old_ta._size; + new_ta._colocate_with = old_ta._colocate_with; + new_ta._element_shape = old_ta._element_shape; return new_ta; } diff --git a/src/TensorFlowNET.Core/Operations/while_v2.cs b/src/TensorFlowNET.Core/Operations/while_v2.cs new file mode 100644 index 000000000..7ee3e9e8d --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/while_v2.cs @@ -0,0 +1,401 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using Tensorflow.Common.Extensions; +using Tensorflow.Common.Types; +using Tensorflow.Eager; +using Tensorflow.Framework; +using Tensorflow.Framework.Models; +using Tensorflow.Graphs; +using static Tensorflow.Binding; + +namespace Tensorflow.Operations +{ + class _OperationWithOutputs : Operation + { + public _OperationWithOutputs(IntPtr handle, Graph g = null) + { + _handle = handle; + _graph = g; + _outputs = null; + g._add_op(this); + } + } + internal class while_v2 + { + public static Tensor[] while_loop(Func cond, + Func body, + Tensors loop_vars, + int maximum_iterations = -1, + int parallel_iterations = 10, + string name = null, + bool back_prop = true, + bool return_same_structure = true) + { + var orig_loop_vars = loop_vars; + var flat_orig_loop_vars = orig_loop_vars.Flatten().ToArray(); + int len_orig_loop_vars = orig_loop_vars.Length; + + loop_vars = _tensor_array_to_flow(loop_vars); + loop_vars = Nest.MapStructure(x => _convert_to_tensor_or_indexed_slices(x, TF_DataType.DtInvalid, null), loop_vars).ToTensors(); + + var loop_vars_signature = Nest.MapStructure(x => new TensorSpec(x.shape, x.dtype), _tensor_array_to_flow(loop_vars)); + + var flat_shape_invariants = Nest.Flatten(loop_vars_signature).Select(x => x.shape).ToArray(); + + if(string.IsNullOrEmpty(name)) + { + name = "while"; + } + + return tf_with(ops.name_scope(name), nameScopeWhile => + { + string scope = (nameScopeWhile as ops.NameScope).scope_name; + string cond_name = control_flow_util.unique_fn_name(scope, "cond"); + string body_name = control_flow_util.unique_fn_name(scope, "body"); + + var maximum_iterations_loop_var = _build_maximum_iterations_loop_var(maximum_iterations); + var loop_counter = constant_op.constant(0, maximum_iterations == -1 ? TF_DataType.DtInvalid : maximum_iterations_loop_var.dtype, + name: "loop_counter"); + loop_vars = new Tensor[] { loop_counter, maximum_iterations_loop_var }.Concat(loop_vars).ToArray(); + + var func_graph_signature = new TensorSpec[] {TensorSpec.FromTensor(loop_counter),TensorSpec.FromTensor(maximum_iterations_loop_var)} + .Concat(loop_vars_signature.Flatten()).ToArray(); + + // TODO(Rinne): possible wrong implemenation here. + var add_control_dependencies = false; + + object[] wrapped_cond(object[] inputs) + { + Tensor loop_counter = (Tensor)inputs[0]; + Tensor maximum_iterations_arg = (Tensor)inputs[1]; + Tensor[] args = inputs.Skip(2).Select(x => (Tensor)x).ToArray(); + var pred = cond(_pack_sequence_as(loop_vars_signature, flat_orig_loop_vars, args)); + if(pred.shape.IsNull || pred.shape.ndim > 0) + { + pred = array_ops.squeeze(pred); + } + if(maximum_iterations == -1) + { + return new object[] { pred }; + } + else + { + return new object[] { math_ops.logical_and(loop_counter < maximum_iterations_arg, pred) }; + } + } + + var cond_graph = FuncGraph.func_graph_from_func("cond", wrapped_cond, null, + null, signature: func_graph_signature, add_control_dependencies: add_control_dependencies); + + bool stateful_parallelism = false; + + object[] wrapped_body(object[] inputs) + { + Tensor loop_counter = (Tensor)inputs[0]; + Tensor maximum_iterations_arg = (Tensor)inputs[1]; + Tensor[] args = inputs.Skip(2).Select(x => (Tensor)x).ToArray(); + + _copy_handle_data(loop_vars.Flatten().Skip(2), args); + + foreach(var t in cond_graph.external_captures) + { + var graph = (FuncGraph)(ops.get_default_graph()); + graph.capture(t); + } + + var outputs = body(_pack_sequence_as(loop_vars_signature, flat_orig_loop_vars, args)); + outputs = _tensor_array_to_flow(outputs); + + return new object[] { loop_counter + 1, maximum_iterations_arg }.Concat(outputs).ToArray(); + } + + var body_graph = FuncGraph.func_graph_from_func("body", wrapped_body, null, null, func_graph_signature, + add_control_dependencies: add_control_dependencies, acd_record_initial_resource_uses: stateful_parallelism); + + // TODO(Rinne): possible wrong implementation here. + NestList loop_vars_list = new(new Tensors[] { loop_vars, body_graph.external_captures.ToTensors() }); + body_graph.Outputs.AddRange(body_graph.internal_captures); + + cond_graph.as_default(); + int num_cond_captures = cond_graph.external_captures.Length; + Debug.Assert(cond_graph.external_captures.SequenceEqual(body_graph.external_captures.Take(num_cond_captures).ToArray())); + _duplicate_body_captures_in_cond(cond_graph, body_graph.external_captures.Skip(num_cond_captures).ToArray()); + cond_graph.Exit(); + + int first_loop_var_index = 2; + + int num_flattened_oututs = orig_loop_vars.Length; + int num_original_outputs = body_graph.Outputs.Length; + if (back_prop && control_flow_util.output_all_intermediates()) + { + var intermediate_tensors = _get_intermediates(body_graph); + + foreach(var intermediate_tensor in intermediate_tensors) + { + var tensor_list = list_ops.empty_tensor_list(intermediate_tensor.shape, intermediate_tensor.dtype, maximum_iterations); + loop_vars_list.Values.Add(tensor_list); + + cond_graph.as_default(); + cond_graph.capture(tensor_list); + cond_graph.Exit(); + + body_graph.as_default(); + var appended_tensor_list = gen_ops.tensor_list_push_back(tensor_list, intermediate_tensor); + body_graph.Outputs.Add(appended_tensor_list); + body_graph.Exit(); + } + } + + List flattened_loop_vars = new(); + foreach(var item in loop_vars_list.Values) + { + flattened_loop_vars.AddRange(item.Flatten()); + } + // skip the check + + // TODO(Rinne): deal with control dependencies + var output_shapes = body_graph.Outputs.Select(t => t.shape).ToArray(); + var span = new Span(output_shapes).Slice(first_loop_var_index, num_flattened_oututs); + for(int i = 0; i < span.Length; i++) + { + span[i] = flat_shape_invariants[i]; + } + + Tensor[] outputs = _build_while_op(flattened_loop_vars.ToArray(), cond_graph, body_graph, output_shapes, parallel_iterations, + (nameScopeWhile as ops.NameScope).scope_name, num_original_outputs, stateful_parallelism); + + if (!ops.get_default_graph().building_function) + { + outputs = outputs.Select(t => array_ops.identity(t)).ToArray(); + } + + var output_loop_vars = outputs.Skip(first_loop_var_index).Take(num_flattened_oututs).ToArray(); + + if (!back_prop) + { + output_loop_vars = output_loop_vars.Select(t => array_ops.stop_gradient(t)).ToArray(); + } + outputs = _pack_sequence_as(loop_vars_signature, flat_orig_loop_vars, output_loop_vars); + + return outputs; + }); + } + + private static Tensors _tensor_array_to_flow(Tensors loop_vars) + { + if(loop_vars.NestType == NestType.Node) + { + if(loop_vars.NodeValue is FakeTensorByTensorArray fake) + { + return new Tensors(fake.TensorArray.flow); + } + else + { + return new Tensors(loop_vars.NodeValue!); + } + } + else if(loop_vars.NestType == NestType.List) + { + List> list = new(); + foreach(var item in loop_vars.ListValue!) + { + if(item.NestType == NestType.Node) + { + var nested = item.AsNest(); + if (nested.NodeValue is FakeTensorByTensorArray fake) + { + list.Add(new Nest(fake.TensorArray.flow)); + } + else + { + list.Add(new Nest(nested.NodeValue!)); + } + } + else + { + list.Add(new Nest(item.AsNest())); + } + } + return Tensors.FromNest(new Nest(list)); + } + else + { + throw new NotImplementedException(); + } + } + + private static Tensor[] _build_while_op(Tensor[] loop_vars, FuncGraph cond_graph, FuncGraph body_graph, + Shape[] output_shapes, int parallel_iterations, string name, int num_original_outputs, bool stateful_parallelism) + { + var cond_stateful_ops = cond_graph.get_operations().Select(x => x.op); + var body_stateful_ops = body_graph.get_operations().Select(x => x.op); + + bool is_stateful = cond_stateful_ops.Count() > 0 || body_stateful_ops.Count() > 0; + + Tensor[] _make_op(Tensor[] inputs) + { + Tensor[] outputs; + if (is_stateful) + { + outputs = gen_functional_ops._while( + inputs, + control_flow_util.create_new_tf_function(cond_graph), + control_flow_util.create_new_tf_function(body_graph), + output_shapes, + parallel_iterations, + name + ); + } + else + { + outputs = gen_functional_ops.stateless_while( + inputs, + control_flow_util.create_new_tf_function(cond_graph), + control_flow_util.create_new_tf_function(body_graph), + output_shapes, + parallel_iterations, + name + ); + } + var (while_op, tensors) = control_flow_util.get_op_and_outputs(outputs); + _copy_handle_data(body_graph.Outputs, tensors); + _set_read_only_resource_inputs_attr(while_op, new FuncGraph[]{cond_graph, body_graph}); + while_op._set_attr("_num_original_outputs", new AttrValue() { I = num_original_outputs }); + while_op._set_attr("_stateful_parallelism", new AttrValue() { B = stateful_parallelism }); + + cond_graph.outer_graph = ops.get_default_graph(); + body_graph.outer_graph = ops.get_default_graph(); + // TODO(Rinne): set the two graphs to while_op + return tensors; + } + + return control_flow_util.run_as_function_for_tape_gradients(_make_op, loop_vars); + } + + /// + /// Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. + /// + /// + /// + private static void _set_read_only_resource_inputs_attr(Operation op, FuncGraph[] branch_graphs) + { + List read_only_indices = Enumerable.Range(0, op.inputs.Length).ToList(); + foreach(var branch_graph in branch_graphs) + { + if (read_only_indices.Count == 0) + { + break; + } + var branch_read_only_indices = auto_control_deps_utils.get_read_only_resource_input_indices_graph(branch_graph); + read_only_indices = read_only_indices.Intersect(branch_read_only_indices).ToList(); + } + AttrValue.Types.ListValue listValue = new(); + listValue.I.AddRange(read_only_indices.OrderBy(x => x).Select(x => (long)x)); + op._set_attr(auto_control_deps_utils.READ_ONLY_RESOURCE_INPUTS_ATTR, new AttrValue() + { + List = listValue + }); + } + + private static Tensors _pack_sequence_as(INestStructure loop_vars_signature, Tensor[] flat_orig_loop_vars, Tensor[] loop_vars) + { + var flattened_loop_vars = zip(loop_vars, flat_orig_loop_vars).Select<(Tensor, Tensor), Tensor>(item => + { + var (flow, y) = item; + if (y is FakeTensorByTensorArray ta) + { + return new FakeTensorByTensorArray(tensor_array_ops.build_ta_with_new_flow(ta.TensorArray, flow)); + } + else + { + return flow; + } + }).ToArray(); + return Nest.PackSequenceAs(loop_vars_signature, flattened_loop_vars).ToTensors(); + } + + private static Tensor[] _get_intermediates(FuncGraph func_graph) + { + List intermediates = new(); + var reversed_captures = func_graph.captures.ToDictionary(x => x.Item2, x => x.Item1); + + foreach(var op in func_graph.get_operations()) + { + Debug.Assert(op is Operation); + var oper = (Operation)op; + if(oper.type == "Identity" || oper.type == "MutexLock") + { + continue; + } + foreach(var o in op.outputs) + { + if(o != func_graph.Inputs[0] && o.dtype != dtypes.resource && !reversed_captures.ContainsKey(o)) + { + intermediates.Add(o); + } + } + } + return intermediates.ToArray(); + } + + private static void _duplicate_body_captures_in_cond(FuncGraph cond_graph, Tensor[] body_graph_captures) + { + var types = body_graph_captures.Select(t => t.dtype).ToList(); + var c_graph = cond_graph.c_graph; + var placeholders = types.Select(x => CreatePlaceholder(c_graph, _build_cond_placeholders_name_prefix(cond_graph), x)).ToList(); + + var placeholder_ops = placeholders.Select(ph => new _OperationWithOutputs(ph.oper, cond_graph)).ToList(); + + List tensors = new(); + foreach(var (op, ph, dtype) in zip(placeholder_ops, placeholders, types)) + { + var tensor = Tensor._create_with_tf_output(op, 0, dtype, ph); + op._outputs = new Tensor[] { tensor }; + tensors.Add(tensor); + } + + var tuples = zip(body_graph_captures, tensors).ToList(); + var keys = body_graph_captures.Select(t => t.Id).ToList(); + cond_graph._captures.Update(zip(keys, tuples).ToDictionary(x => x.Item1, x => x.Item2)); + cond_graph.Inputs.AddRange(tensors); + } + + private static TF_Output CreatePlaceholder(SafeGraphHandle graph, string name, TF_DataType dtype) + { + var desc = c_api.TF_NewOperation(graph, "Placeholder", name); + c_api.TF_SetAttrType(desc, "dtype", dtype); + var op = c_api.TF_FinishOperation(desc, tf.Status); + tf.Status.Check(true); + var output = new TF_Output(); + output.oper = op; + output.index = 0; + return output; + } + + private static string _build_cond_placeholders_name_prefix(FuncGraph cond_graph) + { + return cond_graph.unique_name(cond_graph.Name + "___redundant_placeholder"); + } + + private static Tensor _convert_to_tensor_or_indexed_slices(Tensor value, TF_DataType dtype, + string name) + { + return ops.convert_to_tensor(value, dtype, name, false); + } + + private static Tensor _build_maximum_iterations_loop_var(int maximum_iterations = -1) + { + return ops.convert_to_tensor(maximum_iterations, dtypes.int32, "maximum_iterations"); + } + + private static void _copy_handle_data(IEnumerable src_tensors, IEnumerable dst_tensors) + { + foreach(var (src_t, dst_t) in zip(src_tensors, dst_tensors)) + { + handle_data_util.copy_handle_data(src_t, dst_t); + } + } + } +} diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs index 498ffda76..e7ff9f748 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs @@ -105,6 +105,13 @@ public Tensor(Operation op, int value_index, TF_DataType dtype) _id = ops.uid(); } + internal static Tensor _create_with_tf_output(Operation op, int value_index, TF_DataType dtype, TF_Output tf_output) + { + Tensor ret = new Tensor(op, value_index, dtype); + ret._tf_output = tf_output; + return ret; + } + protected unsafe void InitTensor(Shape shape, TF_DataType dtype) { _handle = TF_NewTensor(shape, dtype, null); diff --git a/src/TensorFlowNET.Core/Tensors/TensorArray.cs b/src/TensorFlowNET.Core/Tensors/TensorArray.cs index fb59593ce..ff74956ac 100644 --- a/src/TensorFlowNET.Core/Tensors/TensorArray.cs +++ b/src/TensorFlowNET.Core/Tensors/TensorArray.cs @@ -14,7 +14,9 @@ You may obtain a copy of the License at limitations under the License. ******************************************************************************/ +using Tensorflow.Common.Types; using Tensorflow.Operations; +using static Tensorflow.Binding; namespace Tensorflow { @@ -44,5 +46,27 @@ public abstract class TensorArray : ITensorOrTensorArray public abstract Tensor stack(string name = null); public abstract Tensor gather(Tensor indices, string name = null); + + internal bool _dynamic_size; + internal Tensor _size; + internal List _colocate_with; + internal Shape _element_shape; + + public static TensorArray Create(TF_DataType dtype, Tensor size = null, bool dynamic_size = false, + bool clear_after_read = true, string tensor_array_name = null, Tensor handle = null, Tensor flow = null, + bool infer_shape = true, Shape? element_shape = null, + bool colocate_with_first_write_call = true, string name = null) + { + if (tf.Context.executing_eagerly() && (flow is null || flow.dtype != dtypes.variant)) + { + return new _EagerTensorArray(dtype, size, dynamic_size, clear_after_read, tensor_array_name, handle, flow, + infer_shape, element_shape, colocate_with_first_write_call, name); + } + else + { + return new _GraphTensorArrayV2(dtype, size, dynamic_size, clear_after_read, tensor_array_name, handle, flow, + infer_shape, element_shape, colocate_with_first_write_call, name); + } + } } } diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs index 259b1eec7..38a3e5dce 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensors.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs @@ -4,6 +4,8 @@ using System.Collections.Generic; using System.Linq; using Tensorflow.Common.Types; +using Tensorflow.Operations; +using Tensorflow.Common.Extensions; namespace Tensorflow { @@ -58,7 +60,7 @@ public Tensor? SingleOrNull public Tensor this[params string[] slices] => this.First()[slices]; - private Tensors(Nest nested) : base(nested) + internal Tensors(Nest nested) : base(nested) { } @@ -68,9 +70,9 @@ public Tensors(params Tensor[] tensors): base(DealWithConstructorArrayInput(tens } - public Tensors(IEnumerable tensors): base(tensors.Select(x => new Nest(x))) + public Tensors(IList tensors) : base(tensors.Select(x => new Nest(x))) { - + } public Tensors(NDArray nd): base(ops.convert_to_tensor(nd)) @@ -78,6 +80,32 @@ public Tensors(NDArray nd): base(ops.convert_to_tensor(nd)) } + /// + /// Get the element in shallow level. For example, for ts = [1, [2, 3], 4], + /// common indexer has ts[1] = 2. Shallow indexer has ts[1] = [2, 3] + /// + /// + /// + public Tensors GetShallow(int index) + { + if(NestType == NestType.Node) + { + if(index > 0) + { + throw new IndexOutOfRangeException(); + } + return this; + } + else if(NestType == NestType.List) + { + return ListValue![index].AsNest().ToTensors(); + } + else + { + throw new NotImplementedException(); + } + } + private static Nest DealWithConstructorArrayInput(Tensor[] tensors) { if (tensors.Length == 0) @@ -115,8 +143,8 @@ public void Add(Tensor tensor) else if(NestType == NestType.Node) { NestType = NestType.List; - ListValue = new() { new Nest(Value), new Nest(tensor) }; - Value = null; + ListValue = new() { new Nest(NodeValue), new Nest(tensor) }; + NodeValue = null; } else if(NestType == NestType.List) { @@ -125,7 +153,7 @@ public void Add(Tensor tensor) else //Empty { NestType = NestType.Node; - Value = tensor; + NodeValue = tensor; } } @@ -140,9 +168,9 @@ public void AddRange(IEnumerable tensors) else if (NestType == NestType.Node) { NestType = NestType.List; - ListValue = new() { new Nest(Value) }; + ListValue = new() { new Nest(NodeValue) }; ListValue.AddRange(tensors.Select(x => new Nest(x))); - Value = null; + NodeValue = null; } else if(NestType == NestType.List) { @@ -151,7 +179,7 @@ public void AddRange(IEnumerable tensors) else // empty { NestType = NestType.List; - ListValue = tensors.Select(x => new Nest(x)).ToList(); + ListValue = tensors.Select(x => new Nest(x) as INestStructure).ToList(); } } @@ -166,9 +194,9 @@ public void Insert(int index, Tensor tensor) else if(NestType == NestType.Node) { NestType = NestType.List; - ListValue = new() { new Nest(Value) }; + ListValue = new() { new Nest(NodeValue) }; ListValue.Insert(index, new Nest(tensor)); - Value = null; + NodeValue = null; } else { @@ -283,7 +311,7 @@ public static implicit operator Tensor(Tensors? tensors) => tensors?.SingleOrNull; public static implicit operator Tensor[](Tensors tensors) - => tensors.Flatten().ToArray(); + => tensors.Flatten().ToArray(); #endregion public static Tensors? FromNest(Nest nested) @@ -298,7 +326,7 @@ public static implicit operator Tensor[](Tensors tensors) public void Deconstruct(out Tensor a, out Tensors? b) { a = this.First(); - b = Length == 1? null : new Tensors(this.Skip(1)); + b = Length == 1? null : new Tensors(this.Skip(1).ToArray()); } public override string ToString() diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index 6d1385ca4..fb9bccf31 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -576,7 +576,7 @@ public static bool inside_function() public static HandleData get_resource_handle_data(Tensor graph_op) { var handle_data = c_api.TFC_GetHandleShapeAndType(graph_op.graph.c_graph, graph_op._as_tf_output()); - return HandleData.Parser.ParseFrom(tf.compat.as_bytes(c_api.StringPiece(handle_data))); + return HandleData.Parser.ParseFrom(c_api.ByteStringPiece(handle_data)); } public static void dismantle_graph(Graph graph) diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index 1336e9af5..8dbcf90d5 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -25,6 +25,7 @@ limitations under the License. using static Tensorflow.Graphs.SubGraphUtility; using Tensorflow.Util; using Tensorflow.Common.Types; +using System.Diagnostics; namespace Tensorflow.Keras { @@ -485,7 +486,7 @@ Tensor swap_batch_timestep(Tensor input_t) var first_flatted_input = flatted_inptus[0]; var time_steps = first_flatted_input.shape[0]; var batch = first_flatted_input.shape[1]; - var time_steps_t = (int)first_flatted_input.shape[0]; + var time_steps_t = tf.shape(first_flatted_input)[0]; foreach (var input_ in flatted_inptus) { @@ -704,7 +705,7 @@ object _get_input_tensor(int time) var input_ta = new List(); for (int i = 0; i < flatted_inptus.Count; i++) { - input_ta.Add(tf.TensorArray(dtype: flatted_inptus[i].dtype, size: time_steps_t)); + input_ta.Add(TensorArray.Create(dtype: flatted_inptus[i].dtype, size: time_steps_t)); } foreach(var (ta, input_) in zip(input_ta, flatted_inptus)) @@ -730,18 +731,15 @@ object _get_input_tensor(int time) (output_time_zero, _) = step_function(input_time_zero, constants is null ? initial_states : initial_states.MergeWith(constants)); - int output_ta_size = return_all_outputs ? time_steps_t : 1; + Tensor output_ta_size = return_all_outputs ? time_steps_t : constant_op.constant(1); var output_ta = new List(); - for (int i = 0; i < output_time_zero.ToList().Count; i++) + foreach(var output in output_time_zero.Flatten()) { - var Out = output_time_zero.ToList()[i]; - output_ta.Add(tf.TensorArray(dtype: Out.dtype, size: output_ta_size, element_shape: Out.shape)); + output_ta.Add(TensorArray.Create(dtype: output.dtype, size: output_ta_size, element_shape: output.shape)); } var time = tf.constant(0, dtype: TF_DataType.TF_INT32, name: "time"); - - Func? masking_fn; Func? compute_masked_output = null; if (mask != null) @@ -750,7 +748,7 @@ object _get_input_tensor(int time) { mask = tf.reverse(mask, axis: new[] { 0 }); } - var mask_ta = tf.TensorArray(dtype: TF_DataType.TF_BOOL, size: time_steps_t); + var mask_ta = TensorArray.Create(dtype: TF_DataType.TF_BOOL, size: time_steps_t); mask_ta = mask_ta.unstack(mask); masking_fn = (time) => @@ -810,9 +808,9 @@ object _get_input_tensor(int time) masking_fn = null; } - Func cond = (time) => (time < time_steps_t); + Func cond = (time) => (time[0] < time_steps_t); int parallel_iterations = 32; - new_states = states; + Tensors final_outputs; if (masking_fn != null) { // Mask for the T output will be base on the output of T - 1. In the @@ -825,7 +823,7 @@ object _get_input_tensor(int time) var prev_output = flat_zero_output; var output_ta_t = output_ta; - Tensor _step(Tensor time) + Tensors _step(Tensors tensors) { /* RNN step function. @@ -838,23 +836,28 @@ RNN step function. Tuple(todo): `(time + 1, output_ta_t, output) + tuple(new_states)` */ + Tensor time = tensors[0]; + TensorArray output_ta_t = (tensors[1] as FakeTensorByTensorArray).TensorArray; + Tensors prev_output = tensors.GetShallow(2); + Tensors states = new Tensors(tensors.Skip(2 + prev_output.Length).ToArray()); + var flat_current_input = input_ta.Select(x => x.read(time)).ToList(); // maybe set shape // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type var current_input = Nest.PackSequenceAs(inputs, flat_current_input).ToTensors(); var mask_t = masking_fn(time); - var (output, new_states_internal) = step_function(current_input, new_states.MergeWith(constants)); + var (output, new_states) = step_function(current_input, states.MergeWith(constants)); // mask output var flat_output = Nest.Flatten(output).ToList(); - var flat_mask_output = zero_output_for_mask ? flat_zero_output : prev_output.ToList(); + var flat_mask_output = zero_output_for_mask ? flat_zero_output : prev_output.Flatten().ToList(); // TODO(Wanglongzhi2001),deal with compute_masked_output's third parameter's type var flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output); // mask states - var flat_state = states.ToList(); - var flat_new_state = new_states_internal.ToList(); + var flat_state = states.Flatten().ToList(); + var flat_new_state = new_states.Flatten().ToList(); foreach (var (state, new_state) in zip(flat_state, flat_new_state)) { @@ -865,38 +868,37 @@ RNN step function. } var flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state); - new_states_internal = Nest.PackSequenceAs(new_states, flat_final_state).ToTensors(); + new_states = Nest.PackSequenceAs(new_states, flat_final_state.ToArray()).ToTensors(); var ta_index_to_write = return_all_outputs ? time : tf.constant(0); - output_ta_t = zip(output_ta_t, flat_new_output).Select(item => - { - var (ta, out_) = item; - return ta.write(ta_index_to_write, out_); - }).ToList(); + Debug.Assert(flat_output.Count() == 1); + output_ta_t = output_ta_t.write(ta_index_to_write, flat_new_output.First()); - - new_states_internal = Nest.PackSequenceAs(initial_states, flat_new_state).ToTensors(); - - output_ta = output_ta_t; - new_states = new_states_internal; - return time + 1; + return new Tensor[] { time + 1, new FakeTensorByTensorArray(output_ta_t) }.Concat(flat_new_output).Concat(new_states) + .ToArray().ToTensors(); } - var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: time, parallel_iterations: parallel_iterations); + var loop_vars = new Tensor[] { time + 1, new FakeTensorByTensorArray(output_ta[0]) } + .Concat(flat_zero_output.Flatten()).Concat(states).ToArray().ToTensors(); + final_outputs = control_flow_ops.while_loop(cond: cond, body: _step, loop_vars: loop_vars, parallel_iterations: parallel_iterations); + new_states = final_outputs.Skip(3).ToList(); } else { var output_ta_t = output_ta; new_states = states; - Tensor _step(Tensor time) + Tensors _step(Tensors tensors) { + Tensor time = tensors[0]; + TensorArray output_ta_t = (tensors[1] as FakeTensorByTensorArray).TensorArray; + Tensors states = new Tensors(tensors.Skip(2).ToArray()); var flat_current_input = input_ta.Select(x => x.read(time)).ToList(); // maybe set shape // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type var current_input = Nest.PackSequenceAs(inputs, flat_current_input).ToTensors(); - var (output, new_states_internal) = step_function(current_input, new_states.MergeWith(constants)); + var (output, new_states) = step_function(current_input, states.MergeWith(constants)); var flat_state = new_states.Flatten().ToList(); - var flat_new_state = new_states_internal.Flatten().ToList(); + var flat_new_state = new_states.Flatten().ToList(); foreach (var (state, new_state) in zip(flat_state, flat_new_state)) { if (new_state is Tensor) @@ -906,24 +908,23 @@ Tensor _step(Tensor time) } var flat_output = Nest.Flatten(output); var ta_index_to_write = return_all_outputs ? time : tf.constant(0); - output_ta_t = zip(output_ta_t, flat_output).Select(item => - { - var (ta, out_) = item; - return ta.write(ta_index_to_write, out_); - }).ToList(); - - new_states_internal = Nest.PackSequenceAs(initial_states, flat_new_state).ToTensors(); - output_ta = output_ta_t; - new_states = new_states_internal; - return time + 1; + Debug.Assert(flat_output.Count() == 1); + output_ta_t = output_ta_t.write(ta_index_to_write, flat_output.First()); + + new_states = Nest.PackSequenceAs(initial_states, flat_new_state).ToTensors(); + return new Tensor[] { time + 1, new FakeTensorByTensorArray(output_ta_t) }.Concat(new_states).ToArray().ToTensors(); } - var final_outputs = tf.while_loop(cond: cond, body: _step, loop_vars: time, parallel_iterations: parallel_iterations); + Debug.Assert(output_ta.Count == 1); + var loop_vars = new Tensor[] { time + 1, new FakeTensorByTensorArray(output_ta[0]) }.Concat(states).ToArray().ToTensors(); + final_outputs = control_flow_ops.while_loop(cond: cond, body: _step, loop_vars: loop_vars, parallel_iterations: parallel_iterations); + new_states = final_outputs.Skip(2).ToList(); } - outputs = outputs.MergeWith(output_ta.Select(o => o.stack()).ToTensors()); - last_output = last_output.MergeWith(outputs.Select(o => o[-1]).ToTensors()); - outputs = Nest.PackSequenceAs(output_time_zero, outputs).ToTensors(); - last_output = Nest.PackSequenceAs(output_time_zero, last_output).ToTensors(); + output_ta = new List { (final_outputs[1] as FakeTensorByTensorArray).TensorArray }; + outputs = outputs.MergeWith(output_ta.Select(o => o.stack()).ToArray().ToTensors()); + last_output = last_output.MergeWith(outputs.Select(o => o[-1]).ToArray().ToTensors()); + outputs = Nest.PackSequenceAs(output_time_zero, (Tensor[])outputs).ToTensors(); + last_output = Nest.PackSequenceAs(output_time_zero, (Tensor[])last_output).ToTensors(); } Func set_shape; diff --git a/src/TensorFlowNET.Keras/Engine/Model.Build.cs b/src/TensorFlowNET.Keras/Engine/Model.Build.cs index 69afdef90..233363832 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Build.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Build.cs @@ -23,7 +23,7 @@ public override void build(KerasShapesWrapper input_shape) var graph = tf.executing_eagerly() ? new FuncGraph("build_graph") : keras.backend.get_graph(); graph.as_default(); var shapes = input_shape.ToShapeArray(); - var x = new Tensors(shapes.Select(x => base_layer_utils.generate_placeholders_from_shape(x))); + var x = new Tensors(shapes.Select(x => base_layer_utils.generate_placeholders_from_shape(x)).ToArray()); try { Call(x, training: false); diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index 185de4f48..d807b2042 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -95,7 +95,7 @@ public Dictionary evaluate(IEnumerable x, NDArray y, int { var data_handler = new DataHandler(new DataHandlerArgs { - X = new Tensors(x), + X = new Tensors(x.ToArray()), Y = y, Model = this, StepsPerExecution = _steps_per_execution @@ -188,7 +188,7 @@ Dictionary test_step_multi_inputs_function(DataHandler data_handl { var data = iterator.next(); var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; - var outputs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size))); + var outputs = train_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray())); tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1)); return outputs; } diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index bb8e18ccf..76c592ad6 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -110,7 +110,7 @@ public ICallback fit(IEnumerable x, NDArray y, var data_handler = new DataHandler(new DataHandlerArgs { - X = new Tensors(train_x), + X = new Tensors(train_x.ToArray()), Y = train_y, BatchSize = batch_size, InitialEpoch = initial_epoch, diff --git a/src/TensorFlowNET.Keras/Engine/Model.Train.cs b/src/TensorFlowNET.Keras/Engine/Model.Train.cs index 905ea453a..48c16e181 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Train.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Train.cs @@ -21,7 +21,7 @@ Dictionary train_step_multi_inputs_function(DataHandler data_hand { var data = iterator.next(); var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; - var outputs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size))); + var outputs = train_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray())); tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1)); return outputs; } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs index 78d3dac96..d2669cccf 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs @@ -4,10 +4,11 @@ using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; +using Tensorflow.Keras.Utils; namespace Tensorflow.Keras.Layers.Rnn { - public abstract class DropoutRNNCellMixin: RnnCellBase + public abstract class DropoutRNNCellMixin: Layer, IRnnCell { public float dropout; public float recurrent_dropout; @@ -17,6 +18,14 @@ public DropoutRNNCellMixin(LayerArgs args): base(args) } + public abstract GeneralizedTensorShape StateSize { get; } + public abstract GeneralizedTensorShape OutputSize { get; } + public abstract bool SupportOptionalArgs { get; } + public virtual Tensors GetInitialState(Tensors inputs, Tensor batch_size, TF_DataType dtype) + { + return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size, dtype); + } + protected void _create_non_trackable_mask_cache() { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index 0ebd73628..77f7d927f 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -206,7 +206,6 @@ object get_state_spec(Shape shape) // append bacth dim state_spec_shape = new int[] { -1 }.concat(state_spec_shape); return new InputSpec(shape: state_spec_shape); - } // Check whether the input shape contains any nested shapes. It could be @@ -298,7 +297,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo // cell_call_fn = (self.cell.__call__ if callable(self.cell) else self.cell.call) Func step; - bool is_tf_rnn_cell = _cell.IsTFRnnCell; + bool is_tf_rnn_cell = false; if (constants is not null) { if (!_cell.SupportOptionalArgs) @@ -310,8 +309,8 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo step = (inputs, states) => { - constants = new Tensors(states.TakeLast(_num_constants)); - states = new Tensors(states.SkipLast(_num_constants)); + constants = new Tensors(states.TakeLast(_num_constants).ToArray()); + states = new Tensors(states.SkipLast(_num_constants).ToArray()); states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states; var (output, new_states) = _cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); return (output, new_states.Single); @@ -395,12 +394,12 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo { if (_num_constants != 0) { - initial_state = new Tensors(inputs.Skip(1)); + initial_state = new Tensors(inputs.Skip(1).ToArray()); } else { - initial_state = new Tensors(inputs.Skip(1).SkipLast(_num_constants)); - constants = new Tensors(inputs.TakeLast(_num_constants)); + initial_state = new Tensors(inputs.Skip(1).SkipLast(_num_constants).ToArray()); + constants = new Tensors(inputs.TakeLast(_num_constants).ToArray()); } if (len(initial_state) == 0) initial_state = null; @@ -558,36 +557,14 @@ public Tensors __call__(Tensors inputs, Tensor state = null, Tensor training = n protected Tensors get_initial_state(Tensors inputs) { - var get_initial_state_fn = _cell.GetType().GetMethod("get_initial_state"); - var input = inputs[0]; - var input_shape = inputs.shape; + var input_shape = array_ops.shape(inputs); var batch_size = _args.TimeMajor ? input_shape[1] : input_shape[0]; var dtype = input.dtype; - Tensors init_state = new Tensors(); - - if(get_initial_state_fn != null) - { - init_state = (Tensors)get_initial_state_fn.Invoke(_cell, new object[] { inputs, batch_size, dtype }); - - } - //if (_cell is RnnCellBase rnn_base_cell) - //{ - // init_state = rnn_base_cell.GetInitialState(null, batch_size, dtype); - //} - else - { - init_state = RnnUtils.generate_zero_filled_state(batch_size, _cell.StateSize, dtype); - } + Tensors init_state = _cell.GetInitialState(null, batch_size, dtype); return init_state; } - - // Check whether the state_size contains multiple states. - public static bool is_multiple_state(GeneralizedTensorShape state_size) - { - return state_size.Shapes.Length > 1; - } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs deleted file mode 100644 index 751312e5d..000000000 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RnnCellBase.cs +++ /dev/null @@ -1,24 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; -using Tensorflow.Common.Types; -using Tensorflow.Keras.ArgsDefinition; -using Tensorflow.Keras.ArgsDefinition.Rnn; -using Tensorflow.Keras.Engine; -using Tensorflow.Keras.Utils; - -namespace Tensorflow.Keras.Layers.Rnn -{ - public abstract class RnnCellBase: Layer, IRnnCell - { - public RnnCellBase(LayerArgs args) : base(args) { } - public abstract GeneralizedTensorShape StateSize { get; } - public abstract GeneralizedTensorShape OutputSize { get; } - public abstract bool IsTFRnnCell { get; } - public abstract bool SupportOptionalArgs { get; } - public virtual Tensors GetInitialState(Tensors inputs, long batch_size, TF_DataType dtype) - { - return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size, dtype); - } - } -} diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs index 39610ff52..3b4b9419e 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs @@ -7,6 +7,7 @@ using Tensorflow.Common.Types; using Tensorflow.Common.Extensions; using Tensorflow.Keras.Utils; +using Tensorflow.Graphs; namespace Tensorflow.Keras.Layers.Rnn { @@ -28,7 +29,6 @@ public class SimpleRNNCell : DropoutRNNCellMixin public override GeneralizedTensorShape StateSize => _state_size; public override GeneralizedTensorShape OutputSize => _output_size; - public override bool IsTFRnnCell => true; public override bool SupportOptionalArgs => false; public SimpleRNNCell(SimpleRNNCellArgs args) : base(args) @@ -98,7 +98,6 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra { prev_output = math_ops.multiply(prev_output, rec_dp_mask); } - var tmp = _recurrent_kernel.AsTensor(); Tensor output = h + math_ops.matmul(prev_output, _recurrent_kernel.AsTensor()); if (_args.Activation != null) @@ -117,9 +116,9 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra } } - public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null) + public Tensors get_initial_state(Tensors inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid) { - return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size.Value, dtype.Value); + return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size, dtype); } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs index 56634853d..fb74d6d29 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs @@ -15,7 +15,7 @@ namespace Tensorflow.Keras.Layers.Rnn public class StackedRNNCells : Layer, IRnnCell { public IList Cells { get; set; } - public bool reverse_state_order; + public bool _reverse_state_order; public StackedRNNCells(StackedRNNCellsArgs args) : base(args) { @@ -23,22 +23,11 @@ public StackedRNNCells(StackedRNNCellsArgs args) : base(args) { args.Kwargs = new Dictionary(); } - foreach (var cell in args.Cells) - { - //Type type = cell.GetType(); - //var CallMethodInfo = type.GetMethod("Call"); - //if (CallMethodInfo == null) - //{ - // throw new ValueError( - // "All cells must have a `Call` method. " + - // $"Received cell without a `Call` method: {cell}"); - //} - } Cells = args.Cells; - reverse_state_order = (bool)args.Kwargs.Get("reverse_state_order", false); + _reverse_state_order = (bool)args.Kwargs.Get("reverse_state_order", false); - if (reverse_state_order) + if (_reverse_state_order) { throw new WarningException("reverse_state_order=True in StackedRNNCells will soon " + "be deprecated. Please update the code to work with the " + @@ -47,49 +36,37 @@ public StackedRNNCells(StackedRNNCellsArgs args) : base(args) } } + public bool SupportOptionalArgs => false; + public GeneralizedTensorShape StateSize { get { - GeneralizedTensorShape state_size = new GeneralizedTensorShape(1, Cells.Count); - if (reverse_state_order && Cells.Count > 0) + if (_reverse_state_order) { - var idxAndCell = Cells.Reverse().Select((cell, idx) => (idx, cell)); - foreach (var cell in idxAndCell) - { - state_size.Shapes[cell.idx] = cell.cell.StateSize.Shapes.First(); - } + var state_sizes = Cells.Reverse().Select(cell => cell.StateSize); + return new GeneralizedTensorShape(new Nest(state_sizes.Select(s => new Nest(s)))); } else { - //foreach (var cell in Cells) - //{ - // state_size.Shapes.add(cell.StateSize.Shapes.First()); - - //} - var idxAndCell = Cells.Select((cell, idx) => (idx, cell)); - foreach (var cell in idxAndCell) - { - state_size.Shapes[cell.idx] = cell.cell.StateSize.Shapes.First(); - } + var state_sizes = Cells.Select(cell => cell.StateSize); + return new GeneralizedTensorShape(new Nest(state_sizes.Select(s => new Nest(s)))); } - return state_size; } } - public object output_size + public GeneralizedTensorShape OutputSize { get { - var lastCell = Cells.LastOrDefault(); - if (lastCell.OutputSize.ToSingleShape() != -1) + var lastCell = Cells.Last(); + if(lastCell.OutputSize is not null) { return lastCell.OutputSize; } - else if (RNN.is_multiple_state(lastCell.StateSize)) + else if (RnnUtils.is_multiple_state(lastCell.StateSize)) { return lastCell.StateSize.First(); - //throw new NotImplementedException(""); } else { @@ -98,79 +75,65 @@ public object output_size } } - public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null) + public Tensors GetInitialState(Tensors inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid) { - var cells = reverse_state_order ? Cells.Reverse() : Cells; - Tensors initial_states = new Tensors(); + var cells = _reverse_state_order ? Cells.Reverse() : Cells; + List initial_states = new List(); foreach (var cell in cells) { - var get_initial_state_fn = cell.GetType().GetMethod("get_initial_state"); - if (get_initial_state_fn != null) - { - var result = (Tensors)get_initial_state_fn.Invoke(cell, new object[] { inputs, batch_size, dtype }); - initial_states.Add(result); - } - else - { - initial_states.Add(RnnUtils.generate_zero_filled_state_for_cell(cell, inputs, batch_size.Value, dtype.Value)); - } + initial_states.Add(cell.GetInitialState(inputs, batch_size, dtype)); } - return initial_states; + return new Tensors(initial_states); } - protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) + protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null) { // Recover per-cell states. - var state_size = reverse_state_order ? StateSize.Reverse() : StateSize; - var nested_states = reverse_state_order ? state.Flatten().Reverse() : state.Flatten(); + var state_size = _reverse_state_order ? new GeneralizedTensorShape(StateSize.Reverse()) : StateSize; + var nested_states = Nest.PackSequenceAs(state_size, Nest.Flatten(states).ToArray()); - - var new_nest_states = new Tensors(); + var new_nest_states = Nest.Empty; // Call the cells in order and store the returned states. - foreach (var (cell, states) in zip(Cells, nested_states)) + foreach (var (cell, internal_states) in zip(Cells, nested_states)) { - // states = states if tf.nest.is_nested(states) else [states] - var type = cell.GetType(); - bool IsTFRnnCell = type.GetProperty("IsTFRnnCell") != null; - state = len(state) == 1 && IsTFRnnCell ? state.FirstOrDefault() : state; - RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs; Tensors? constants = rnn_optional_args?.Constants; Tensors new_states; - (inputs, new_states) = cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); + (inputs, new_states) = cell.Apply(inputs, internal_states, optional_args: new RnnOptionalArgs() { Constants = constants }); - new_nest_states.Add(new_states); + new_nest_states = new_nest_states.MergeWith(new_states); } - new_nest_states = reverse_state_order ? new_nest_states.Reverse().ToArray() : new_nest_states.ToArray(); - return new Nest(new List> { - new Nest(new List> { new Nest(inputs.Single()) }), new Nest(new_nest_states) }) - .ToTensors(); + return Tensors.FromNest((inputs, Nest.PackSequenceAs(state_size, Nest.Flatten(new_nest_states).ToArray()))); } - - - public void build() + public override void build(KerasShapesWrapper input_shape) { - built = true; - // @tf_utils.shape_type_conversion - // def build(self, input_shape) : - // if isinstance(input_shape, list) : - // input_shape = input_shape[0] - // for cell in self.cells: - // if isinstance(cell, Layer) and not cell.built: - // with K.name_scope(cell.name): - // cell.build(input_shape) - // cell.built = True - // if getattr(cell, 'output_size', None) is not None: - // output_dim = cell.output_size - // elif _is_multiple_state(cell.state_size) : - // output_dim = cell.state_size[0] - // else: - // output_dim = cell.state_size - // input_shape = tuple([input_shape[0]] + - // tensor_shape.TensorShape(output_dim).as_list()) - // self.built = True + var shape = input_shape.ToSingleShape(); + foreach(var cell in Cells) + { + if(cell is Layer layer && !layer.Built) + { + // ignored the name scope. + layer.build(shape); + layer.Built = true; + } + GeneralizedTensorShape output_dim; + if(cell.OutputSize is not null) + { + output_dim = cell.OutputSize; + } + else if (RnnUtils.is_multiple_state(cell.StateSize)) + { + output_dim = cell.StateSize.First(); + } + else + { + output_dim = cell.StateSize; + } + shape = new Shape(new long[] { shape.dims[0] }.Concat(output_dim.ToSingleShape().dims).ToArray()); + } + this.Built = true; } public override IKerasConfig get_config() @@ -198,14 +161,5 @@ public void from_config() // deserialize_layer(cell_config, custom_objects = custom_objects)) // return cls(cells, **config) } - - public (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null) - { - throw new NotImplementedException(); - } - - public GeneralizedTensorShape OutputSize => throw new NotImplementedException(); - public bool IsTFRnnCell => true; - public bool SupportOptionalArgs => throw new NotImplementedException(); } } diff --git a/src/TensorFlowNET.Keras/Utils/RnnUtils.cs b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs index 3109eb77b..7ff3f9fb8 100644 --- a/src/TensorFlowNET.Keras/Utils/RnnUtils.cs +++ b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs @@ -10,20 +10,21 @@ namespace Tensorflow.Keras.Utils { internal static class RnnUtils { - internal static Tensors generate_zero_filled_state(long batch_size_tensor, GeneralizedTensorShape state_size, TF_DataType dtype) + internal static Tensors generate_zero_filled_state(Tensor batch_size_tensor, GeneralizedTensorShape state_size, TF_DataType dtype) { Func create_zeros; create_zeros = (GeneralizedTensorShape unnested_state_size) => { var flat_dims = unnested_state_size.ToSingleShape().dims; - var init_state_size = new long[] { batch_size_tensor }.Concat(flat_dims).ToArray(); - return array_ops.zeros(new Shape(init_state_size), dtype: dtype); + var init_state_size = new Tensor[] { batch_size_tensor }. + Concat(flat_dims.Select(x => tf.constant(x, dtypes.int32))).ToArray(); + return array_ops.zeros(init_state_size, dtype: dtype); }; // TODO(Rinne): map structure with nested tensors. - if(state_size.Shapes.Length > 1) + if(state_size.TotalNestedCount > 1) { - return new Tensors(state_size.ToShapeArray().Select(s => create_zeros(new GeneralizedTensorShape(s)))); + return new Tensors(state_size.Flatten().Select(s => create_zeros(new GeneralizedTensorShape(s))).ToArray()); } else { @@ -32,11 +33,11 @@ internal static Tensors generate_zero_filled_state(long batch_size_tensor, Gener } - internal static Tensors generate_zero_filled_state_for_cell(IRnnCell cell, Tensors inputs, long batch_size, TF_DataType dtype) + internal static Tensors generate_zero_filled_state_for_cell(IRnnCell cell, Tensors inputs, Tensor batch_size, TF_DataType dtype) { - if (inputs != null) + if (inputs is not null) { - batch_size = inputs.shape[0]; + batch_size = array_ops.shape(inputs)[0]; dtype = inputs.dtype; } return generate_zero_filled_state(batch_size, cell.StateSize, dtype); @@ -77,17 +78,27 @@ internal static (Tensors, Tensors, Tensors) standardize_args(Tensors inputs, Ten Debug.Assert(initial_state is null && constants is null); if(num_constants > 0) { - constants = inputs.TakeLast(num_constants).ToTensors(); - inputs = inputs.SkipLast(num_constants).ToTensors(); + constants = inputs.TakeLast(num_constants).ToArray().ToTensors(); + inputs = inputs.SkipLast(num_constants).ToArray().ToTensors(); } if(inputs.Length > 1) { - initial_state = inputs.Skip(1).ToTensors(); - inputs = inputs.Take(1).ToTensors(); + initial_state = inputs.Skip(1).ToArray().ToTensors(); + inputs = inputs.Take(1).ToArray().ToTensors(); } } return (inputs, initial_state, constants); } + + /// + /// Check whether the state_size contains multiple states. + /// + /// + /// + public static bool is_multiple_state(GeneralizedTensorShape state_size) + { + return state_size.TotalNestedCount > 1; + } } } diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs index 6d7182e09..23dc1d44d 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs @@ -28,8 +28,8 @@ public void WhileLoopTwoInputsEagerMode() var i = tf.constant(2); var j = tf.constant(3); - Func c = (x) => tf.less(x[0] + x[1], 10); - Func b = (x) => new[] { tf.add(x[0], 1), tf.add(x[1], 1) }; + Func c = (x) => tf.less(x[0] + x[1], 10); + Func b = (x) => new[] { tf.add(x[0], 1), tf.add(x[1], 1) }; var r = tf.while_loop(c, b, new[] { i, j }); Assert.AreEqual(5, (int)r[0]); Assert.AreEqual(6, (int)r[1]); diff --git a/tools/Tensorflow.CodeGen/FunctionGenerator.cs b/tools/Tensorflow.CodeGen/FunctionGenerator.cs index 93f9ea4e9..186e6a27b 100644 --- a/tools/Tensorflow.CodeGen/FunctionGenerator.cs +++ b/tools/Tensorflow.CodeGen/FunctionGenerator.cs @@ -21,7 +21,8 @@ public void AppendFunction(OpDef op, StringBuilder sb) { sb.Append("Operation "); } - else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr) + && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr)) { sb.Append("Tensor "); } @@ -70,7 +71,8 @@ public void AppendFunction(OpDef op, StringBuilder sb) { sb.AppendLine("return null;"); } - else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr) + && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr)) { sb.AppendLine("return _fast_path_result[0];"); } @@ -149,7 +151,8 @@ public void AppendFunction(OpDef op, StringBuilder sb) { sb.AppendLine("return _op;"); } - else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr) + && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr)) { sb.AppendLine("return _result[0];"); } @@ -174,7 +177,7 @@ public void AppendArgs(OpDef op, StringBuilder sb) { argName = $"{argName}_"; } - if (!string.IsNullOrEmpty(arg.NumberAttr)) + if (!string.IsNullOrEmpty(arg.NumberAttr) || !string.IsNullOrEmpty(arg.TypeListAttr)) { sb.Append($"Tensors {argName}, "); } @@ -273,7 +276,8 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) { sb.Append("Operation "); } - else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr) + && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr)) { sb.Append("Tensor "); } @@ -366,6 +370,13 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) sb.Append($"\"{attr.Name}\", {attrRealName}, "); } } + else if(attr.Type == "list(type)") + { + if (op.InputArg.Any(x => x.TypeListAttr == attr.Name)) + { + continue; + } + } else if(attr.Type == "int" && op.InputArg.Any(x => x.NumberAttr == attr.Name)) { bool found = false; @@ -408,7 +419,8 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) { sb.AppendLine("return null;"); } - else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr) + && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr)) { sb.AppendLine("return _result[0];"); } diff --git a/tools/Tensorflow.CodeGen/Program.cs b/tools/Tensorflow.CodeGen/Program.cs index f9d44ce83..cea52e0b4 100644 --- a/tools/Tensorflow.CodeGen/Program.cs +++ b/tools/Tensorflow.CodeGen/Program.cs @@ -5,7 +5,7 @@ using System.Xml.Linq; using Tensorflow.CodeGen; -GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops", +GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops_v2", @"D:\Apps\miniconda3\envs\tf2.11\Lib\site-packages\tensorflow\python\ops", @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\api_def\base_api", @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\ops\ops.pbtxt"); diff --git a/tools/Tensorflow.CodeGen/Utils.cs b/tools/Tensorflow.CodeGen/Utils.cs index d3f30d9f2..19de6c0e0 100644 --- a/tools/Tensorflow.CodeGen/Utils.cs +++ b/tools/Tensorflow.CodeGen/Utils.cs @@ -155,6 +155,10 @@ public static OpList ReadAllOpDefs(string path) } else if (attr.Type == "list(type)") { + if(op.InputArg.Any(x => x.TypeListAttr == attr.Name)) + { + continue; + } if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) { List values = new(); @@ -231,11 +235,11 @@ public static OpList ReadAllOpDefs(string path) } else if (attr.Type == "func") { - res.Add((attr.Name, "Func", "NOVALUE")); + res.Add((attr.Name, "object", "NOVALUE")); } else if (attr.Type == "list(func)") { - res.Add((attr.Name, "Func[]", "NOVALUE")); + res.Add((attr.Name, "object[]", "NOVALUE")); } else if (attr.Type == "tensor") { From 07ea65683362cc2a633e9de0a7e0b550794d2474 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Fri, 16 Jun 2023 16:15:01 +0800 Subject: [PATCH 037/182] fix: error when training SimpleRNN. --- .../Exceptions/NotOkStatusException.cs | 19 +++++++ .../Operations/Operation.cs | 11 +++- .../Operations/gen_math_ops.cs | 3 +- src/TensorFlowNET.Core/Status/Status.cs | 3 +- src/TensorFlowNET.Keras/IsExternalInit.cs | 4 ++ src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 54 ++++++++++++------- .../Layers/Rnn/SimpleRNN.cs | 14 ----- .../Layers/Rnn.Test.cs | 5 ++ 8 files changed, 78 insertions(+), 35 deletions(-) create mode 100644 src/TensorFlowNET.Core/Exceptions/NotOkStatusException.cs create mode 100644 src/TensorFlowNET.Keras/IsExternalInit.cs diff --git a/src/TensorFlowNET.Core/Exceptions/NotOkStatusException.cs b/src/TensorFlowNET.Core/Exceptions/NotOkStatusException.cs new file mode 100644 index 000000000..c283c1a45 --- /dev/null +++ b/src/TensorFlowNET.Core/Exceptions/NotOkStatusException.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Exceptions +{ + public class NotOkStatusException : TensorflowException + { + public NotOkStatusException() : base() + { + + } + + public NotOkStatusException(string message) : base(message) + { + + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index 5e689c655..d31b26d4a 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -186,7 +186,16 @@ public void run(FeedItem[] feed_dict = null, Session session = null) } public virtual T get_attr(string name) - => (T)get_attr(name); + { + if (typeof(T).IsValueType) + { + return (T)Convert.ChangeType(get_attr(name), typeof(T)); + } + else + { + return (T)get_attr(name); + } + } internal unsafe TF_DataType _get_attr_type(string name) { diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 3456d9b3d..6eb7a4116 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -4633,8 +4633,9 @@ public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatMul", name) { args = new object[] { a, b }, attrs = new Dictionary() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b } }); return _fast_path_result[0]; } - catch (Exception) + catch (Exception ex) { + Console.WriteLine(); } try { diff --git a/src/TensorFlowNET.Core/Status/Status.cs b/src/TensorFlowNET.Core/Status/Status.cs index a890c2aef..12b6fba2b 100644 --- a/src/TensorFlowNET.Core/Status/Status.cs +++ b/src/TensorFlowNET.Core/Status/Status.cs @@ -17,6 +17,7 @@ limitations under the License. using System; using System.Diagnostics; using System.Runtime.CompilerServices; +using Tensorflow.Exceptions; using Tensorflow.Util; using static Tensorflow.c_api; @@ -88,7 +89,7 @@ public void Check(bool throwException = false) case TF_Code.TF_INVALID_ARGUMENT: throw new InvalidArgumentError(message); default: - throw new TensorflowException(message); + throw new NotOkStatusException(message); } } } diff --git a/src/TensorFlowNET.Keras/IsExternalInit.cs b/src/TensorFlowNET.Keras/IsExternalInit.cs new file mode 100644 index 000000000..11f062fa8 --- /dev/null +++ b/src/TensorFlowNET.Keras/IsExternalInit.cs @@ -0,0 +1,4 @@ +namespace System.Runtime.CompilerServices +{ + internal static class IsExternalInit { } +} diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index 77f7d927f..f99bc23aa 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -11,6 +11,7 @@ using System.Linq.Expressions; using Tensorflow.Keras.Utils; using Tensorflow.Common.Types; +using System.Runtime.CompilerServices; // from tensorflow.python.distribute import distribution_strategy_context as ds_context; namespace Tensorflow.Keras.Layers.Rnn @@ -30,7 +31,19 @@ public class RNN : RnnBase private int _num_constants; protected IVariableV1 _kernel; protected IVariableV1 _bias; - protected IRnnCell _cell; + private IRnnCell _cell; + protected IRnnCell Cell + { + get + { + return _cell; + } + init + { + _cell = value; + _self_tracked_trackables.Add(_cell); + } + } public RNN(RNNArgs args) : base(PreConstruct(args)) { @@ -40,14 +53,14 @@ public RNN(RNNArgs args) : base(PreConstruct(args)) // if is StackedRnncell if (args.Cells != null) { - _cell = new StackedRNNCells(new StackedRNNCellsArgs + Cell = new StackedRNNCells(new StackedRNNCellsArgs { Cells = args.Cells }); } else { - _cell = args.Cell; + Cell = args.Cell; } // get input_shape @@ -65,7 +78,7 @@ public Tensors States if (_states == null) { // CHECK(Rinne): check if this is correct. - var nested = _cell.StateSize.MapStructure(x => null); + var nested = Cell.StateSize.MapStructure(x => null); _states = nested.AsNest().ToTensors(); } return _states; @@ -83,7 +96,7 @@ private OneOf> compute_output_shape(Shape input_shape) } // state_size is a array of ints or a positive integer - var state_size = _cell.StateSize.ToSingleShape(); + var state_size = Cell.StateSize.ToSingleShape(); // TODO(wanglongzhi2001),flat_output_size应该是什么类型的,Shape还是Tensor Func _get_output_shape; @@ -110,12 +123,12 @@ private OneOf> compute_output_shape(Shape input_shape) return output_shape; }; - Type type = _cell.GetType(); + Type type = Cell.GetType(); PropertyInfo output_size_info = type.GetProperty("output_size"); Shape output_shape; if (output_size_info != null) { - output_shape = nest.map_structure(_get_output_shape, _cell.OutputSize.ToSingleShape()); + output_shape = nest.map_structure(_get_output_shape, Cell.OutputSize.ToSingleShape()); // TODO(wanglongzhi2001),output_shape应该简单的就是一个元组还是一个Shape类型 output_shape = (output_shape.Length == 1 ? (int)output_shape[0] : output_shape); } @@ -171,7 +184,9 @@ private Tensors compute_mask(Tensors inputs, Tensors mask) public override void build(KerasShapesWrapper input_shape) { - object get_input_spec(Shape shape) + input_shape = new KerasShapesWrapper(input_shape.Shapes[0]); + + InputSpec get_input_spec(Shape shape) { var input_spec_shape = shape.as_int_list(); @@ -213,10 +228,13 @@ object get_state_spec(Shape shape) // numpy inputs. - if (!_cell.Built) + if (Cell is Layer layer && !layer.Built) { - _cell.build(input_shape); + layer.build(input_shape); + layer.Built = true; } + + this.built = true; } /// @@ -247,10 +265,10 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo (inputs, initial_state, constants) = _process_inputs(inputs, initial_state, constants); - _maybe_reset_cell_dropout_mask(_cell); - if (_cell is StackedRNNCells) + _maybe_reset_cell_dropout_mask(Cell); + if (Cell is StackedRNNCells) { - var stack_cell = _cell as StackedRNNCells; + var stack_cell = Cell as StackedRNNCells; foreach (IRnnCell cell in stack_cell.Cells) { _maybe_reset_cell_dropout_mask(cell); @@ -300,10 +318,10 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo bool is_tf_rnn_cell = false; if (constants is not null) { - if (!_cell.SupportOptionalArgs) + if (!Cell.SupportOptionalArgs) { throw new ValueError( - $"RNN cell {_cell} does not support constants." + + $"RNN cell {Cell} does not support constants." + $"Received: constants={constants}"); } @@ -312,7 +330,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo constants = new Tensors(states.TakeLast(_num_constants).ToArray()); states = new Tensors(states.SkipLast(_num_constants).ToArray()); states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states; - var (output, new_states) = _cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); + var (output, new_states) = Cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); return (output, new_states.Single); }; } @@ -321,7 +339,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo step = (inputs, states) => { states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states.First()) : states; - var (output, new_states) = _cell.Apply(inputs, states); + var (output, new_states) = Cell.Apply(inputs, states); return (output, new_states); }; } @@ -562,7 +580,7 @@ protected Tensors get_initial_state(Tensors inputs) var batch_size = _args.TimeMajor ? input_shape[1] : input_shape[0]; var dtype = input.dtype; - Tensors init_state = _cell.GetInitialState(null, batch_size, dtype); + Tensors init_state = Cell.GetInitialState(null, batch_size, dtype); return init_state; } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs index 22d0e2770..551c20cdd 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs @@ -32,19 +32,5 @@ private static SimpleRNNArgs CreateCellForArgs(SimpleRNNArgs args) }); return args; } - - public override void build(KerasShapesWrapper input_shape) - { - var single_shape = input_shape.ToSingleShape(); - var input_dim = single_shape[-1]; - _buildInputShape = input_shape; - - _kernel = add_weight("kernel", (single_shape[-1], args.Units), - initializer: args.KernelInitializer - //regularizer = self.kernel_regularizer, - //constraint = self.kernel_constraint, - //caching_device = default_caching_device, - ); - } } } \ No newline at end of file diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index 28a16ad4e..fcb9ad1d6 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -77,6 +77,11 @@ public void SimpleRNN() var output = keras.layers.Dense(10).Apply(x); var model = keras.Model(inputs, output); model.summary(); + + model.compile(keras.optimizers.Adam(), keras.losses.SparseCategoricalCrossentropy()); + var datax = np.ones((16, 10, 8), dtype: dtypes.float32); + var datay = np.ones((16)); + model.fit(datax, datay, epochs: 20); } [TestMethod] public void RNNForSimpleRNNCell() From 5bfe0982e93cbc3ee3e7e1afbbd66e3d445f5bdd Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Fri, 16 Jun 2023 16:15:27 +0800 Subject: [PATCH 038/182] feat: add exception catch to code generator. --- tools/Tensorflow.CodeGen/FunctionGenerator.cs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/Tensorflow.CodeGen/FunctionGenerator.cs b/tools/Tensorflow.CodeGen/FunctionGenerator.cs index 186e6a27b..bb07dddf5 100644 --- a/tools/Tensorflow.CodeGen/FunctionGenerator.cs +++ b/tools/Tensorflow.CodeGen/FunctionGenerator.cs @@ -83,6 +83,10 @@ public void AppendFunction(OpDef op, StringBuilder sb) sb.AppendLine("}"); // try + sb.Append("catch(NotOkStatusException ex)\n{\n"); + sb.AppendLine("throw ex;"); + sb.AppendLine("}"); // catch + sb.Append("catch(Exception)\n{\n"); sb.AppendLine("}"); // catch From df7d700fb162ebe85ff1ae4ca831c7f9e9b1204a Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Wed, 14 Jun 2023 20:34:25 +0800 Subject: [PATCH 039/182] Add new feature: add LSTMCell and test --- .../Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs | 32 ++- .../ArgsDefinition/Rnn/SimpleRNNCellArgs.cs | 4 +- .../Keras/Layers/ILayersApi.cs | 12 + .../Operations/Initializers/Orthogonal.cs | 3 +- .../Operations/array_ops.cs | 43 ++++ src/TensorFlowNET.Keras/Layers/LayersApi.cs | 28 +++ .../Layers/Rnn/DropoutRNNCellMixin.cs | 4 +- .../Layers/Rnn/LSTMCell.cs | 232 +++++++++++++++++- .../Layers/Rnn/SimpleRNNCell.cs | 4 +- .../Layers/Rnn.Test.cs | 50 ++-- 10 files changed, 376 insertions(+), 36 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs index 594c99bb0..786236e4d 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs @@ -1,7 +1,35 @@ -namespace Tensorflow.Keras.ArgsDefinition.Rnn +using Newtonsoft.Json; +using static Tensorflow.Binding; + +namespace Tensorflow.Keras.ArgsDefinition.Rnn { // TODO: complete the implementation - public class LSTMCellArgs : LayerArgs + public class LSTMCellArgs : AutoSerializeLayerArgs { + [JsonProperty("units")] + public int Units { get; set; } + // TODO(Rinne): lack of initialized value of Activation. Merging keras + // into tf.net could resolve it. + [JsonProperty("activation")] + public Activation Activation { get; set; } + [JsonProperty("recurrent_activation")] + public Activation RecurrentActivation { get; set; } + [JsonProperty("use_bias")] + public bool UseBias { get; set; } = true; + [JsonProperty("dropout")] + public float Dropout { get; set; } = .0f; + [JsonProperty("recurrent_dropout")] + public float RecurrentDropout { get; set; } = .0f; + [JsonProperty("kernel_initializer")] + public IInitializer KernelInitializer { get; set; } + [JsonProperty("recurrent_initializer")] + public IInitializer RecurrentInitializer { get; set; } + [JsonProperty("bias_initializer")] + public IInitializer BiasInitializer { get; set; } + [JsonProperty("unit_forget_bias")] + public bool UnitForgetBias { get; set; } = true; + [JsonProperty("implementation")] + public int Implementation { get; set; } = 2; + } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs index 1dfcbe9cf..d21d61905 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs @@ -1,7 +1,4 @@ using Newtonsoft.Json; -using System; -using System.Collections.Generic; -using System.Text; namespace Tensorflow.Keras.ArgsDefinition.Rnn { @@ -25,5 +22,6 @@ public class SimpleRNNCellArgs: AutoSerializeLayerArgs public IInitializer RecurrentInitializer { get; set; } [JsonProperty("bias_initializer")] public IInitializer BiasInitializer { get; set; } + } } diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index 3b2238164..a19508d42 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -160,6 +160,18 @@ public ILayer LayerNormalization(Axis? axis, public ILayer Normalization(Shape? input_shape = null, int? axis = -1, float? mean = null, float? variance = null, bool invert = false); public ILayer LeakyReLU(float alpha = 0.3f); + public IRnnCell LSTMCell(int uints, + string activation = "tanh", + string recurrent_activation = "sigmoid", + bool use_bias = true, + string kernel_initializer = "glorot_uniform", + string recurrent_initializer = "orthogonal", + string bias_initializer = "zeros", + bool unit_forget_bias = true, + float dropout = 0f, + float recurrent_dropout = 0f, + int implementation = 2); + public ILayer LSTM(int units, Activation activation = null, Activation recurrent_activation = null, diff --git a/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs b/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs index 88673bb5e..ae8733740 100644 --- a/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs +++ b/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs @@ -58,8 +58,7 @@ private Tensor _generate_init_val(Shape shape, TF_DataType dtype) if (num_rows < num_cols) { - // q = tf.linalg.matrix_transpose(q); - throw new NotImplementedException(""); + q = array_ops.matrix_transpose(q); } return _gain * tf.reshape(q, shape); diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index ca9e5fae2..c4ec974b8 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -971,6 +971,49 @@ public static Tensor transpose(Tensor a, Tensor perm, string name = "transpose", }); } + /// + /// Transposes last two dimensions of tensor `a`. + /// For example: + /// python + /// x = tf.constant([[1, 2, 3], [4, 5, 6]]) + /// tf.matrix_transpose(x) # [[1, 4], + /// # [2, 5], + /// # [3, 6]] + /// + /// Matrix with two batch dimensions. + /// x.shape is [1, 2, 3, 4] + /// tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3] + /// + /// + /// + /// + /// + /// + public static Tensor matrix_transpose(Tensor a, string name = "matrix_transpose", bool conjugate = false) + { + return tf_with(ops.name_scope(name, "transpose", new { a }), scope => + { + var a_shape = a.shape; + var ndims = a.shape.ndim; + Axis perm; + if(ndims != 0) + { + if (ndims < 2) + { + throw new ValueError("Argument `a` should be a (batch) matrix with rank " + + $">= 2. Received `a` = {a} with shape: {a_shape}"); + } + perm = new Axis(Enumerable.Range(0, ndims - 2).Concat(new int[] { ndims - 1, ndims - 2 }).ToArray()); + } + else + { + var a_rank = a.rank; + perm = new Axis(Enumerable.Range(0, a_rank - 2).Concat(new int[] { a_rank - 1, a_rank - 2 }).ToArray()); + } + return transpose(a, perm:perm, conjugate:conjugate); + }); + } + public static Tensor[] split(Tensor value, Tensor size_splits, int axis, int num = -1, string name = "split") { diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index dd25122d5..66c3cdc1a 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -702,6 +702,7 @@ public IRnnCell SimpleRNNCell( UseBias = use_bias, KernelInitializer = GetInitializerByName(kernel_initializer), RecurrentInitializer = GetInitializerByName(recurrent_initializer), + BiasInitializer = GetInitializerByName(bias_initializer), Dropout = dropout, RecurrentDropout = recurrent_dropout }); @@ -786,6 +787,33 @@ public ILayer RNN( TimeMajor = time_major }); + + public IRnnCell LSTMCell(int uints, + string activation = "tanh", + string recurrent_activation = "sigmoid", + bool use_bias = true, + string kernel_initializer = "glorot_uniform", + string recurrent_initializer = "orthogonal", // TODO(Wanglongzhi2001),glorot_uniform has not been developed. + string bias_initializer = "zeros", + bool unit_forget_bias = true, + float dropout = 0f, + float recurrent_dropout = 0f, + int implementation = 2) + => new LSTMCell(new LSTMCellArgs + { + Units = uints, + Activation = keras.activations.GetActivationFromName(activation), + RecurrentActivation = keras.activations.GetActivationFromName(recurrent_activation), + UseBias = use_bias, + KernelInitializer = GetInitializerByName(kernel_initializer), + RecurrentInitializer = GetInitializerByName(recurrent_initializer), + BiasInitializer = GetInitializerByName(bias_initializer), + UnitForgetBias = unit_forget_bias, + Dropout = dropout, + RecurrentDropout = recurrent_dropout, + Implementation = implementation + }); + /// /// Long Short-Term Memory layer - Hochreiter 1997. /// diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs index d2669cccf..1cc36d34a 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs @@ -41,7 +41,7 @@ public void reset_recurrent_dropout_mask() } - public Tensors? get_dropout_maskcell_for_cell(Tensors input, bool training, int count = 1) + public Tensors? get_dropout_mask_for_cell(Tensors input, bool training, int count = 1) { if (dropout == 0f) return null; @@ -53,7 +53,7 @@ public void reset_recurrent_dropout_mask() } // Get the recurrent dropout mask for RNN cell. - public Tensors? get_recurrent_dropout_maskcell_for_cell(Tensors input, bool training, int count = 1) + public Tensors? get_recurrent_dropout_mask_for_cell(Tensors input, bool training, int count = 1) { if (dropout == 0f) return null; diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs index a622c91a9..94d98e130 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs @@ -1,16 +1,240 @@ -using Tensorflow.Keras.ArgsDefinition.Rnn; +using Serilog.Core; +using System.Diagnostics; +using Tensorflow.Common.Types; +using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; +using Tensorflow.Keras.Saving; +using Tensorflow.Keras.Utils; namespace Tensorflow.Keras.Layers.Rnn { - public class LSTMCell : Layer + /// + /// Cell class for the LSTM layer. + /// See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) + /// for details about the usage of RNN API. + /// This class processes one step within the whole time sequence input, whereas + /// `tf.keras.layer.LSTM` processes the whole sequence. + /// + public class LSTMCell : DropoutRNNCellMixin { - LSTMCellArgs args; + LSTMCellArgs _args; + IVariableV1 _kernel; + IVariableV1 _recurrent_kernel; + IInitializer _bias_initializer; + IVariableV1 _bias; + GeneralizedTensorShape _state_size; + GeneralizedTensorShape _output_size; + public override GeneralizedTensorShape StateSize => _state_size; + public override GeneralizedTensorShape OutputSize => _output_size; + + public override bool IsTFRnnCell => true; + + public override bool SupportOptionalArgs => false; public LSTMCell(LSTMCellArgs args) : base(args) { - this.args = args; + _args = args; + if (args.Units <= 0) + { + throw new ValueError( + $"units must be a positive integer, got {args.Units}"); + } + _args.Dropout = Math.Min(1f, Math.Max(0f, this._args.Dropout)); + _args.RecurrentDropout = Math.Min(1f, Math.Max(0f, this._args.RecurrentDropout)); + if (_args.RecurrentDropout != 0f && _args.Implementation != 1) + { + Debug.WriteLine("RNN `implementation=2` is not supported when `recurrent_dropout` is set." + + "Using `implementation=1`."); + _args.Implementation = 1; + } + + _state_size = new GeneralizedTensorShape(_args.Units, 2); + _output_size = new GeneralizedTensorShape(_args.Units); + + + } + + public override void build(KerasShapesWrapper input_shape) + { + var single_shape = input_shape.ToSingleShape(); + var input_dim = single_shape[-1]; + _kernel = add_weight("kernel", (input_dim, _args.Units * 4), + initializer: _args.KernelInitializer + ); + + _recurrent_kernel = add_weight("recurrent_kernel", (_args.Units, _args.Units * 4), + initializer: _args.RecurrentInitializer + ); + + if (_args.UseBias) + { + if (_args.UnitForgetBias) + { + Tensor bias_initializer() + { + return keras.backend.concatenate( + new Tensors( + _args.BiasInitializer.Apply(new InitializerArgs(shape: (_args.Units))), + tf.ones_initializer.Apply(new InitializerArgs(shape: (_args.Units))), + _args.BiasInitializer.Apply(new InitializerArgs(shape: (_args.Units)))), axis: 0); + } + } + else + { + _bias_initializer = _args.BiasInitializer; + } + _bias = add_weight("bias", (_args.Units * 4), + initializer: _args.BiasInitializer); + } + built = true; + } + protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null) + { + var h_tm1 = states[0]; // previous memory state + var c_tm1 = states[1]; // previous carry state + + var dp_mask = get_dropout_mask_for_cell(inputs, training.Value, count: 4); + var rec_dp_mask = get_recurrent_dropout_mask_for_cell( + h_tm1, training.Value, count: 4); + + + Tensor c; + Tensor o; + if (_args.Implementation == 1) + { + Tensor inputs_i; + Tensor inputs_f; + Tensor inputs_c; + Tensor inputs_o; + if (0f < _args.Dropout && _args.Dropout < 1f) + { + inputs_i = inputs * dp_mask[0]; + inputs_f = inputs * dp_mask[1]; + inputs_c = inputs * dp_mask[2]; + inputs_o = inputs * dp_mask[3]; + } + else + { + inputs_i = inputs; + inputs_f = inputs; + inputs_c = inputs; + inputs_o = inputs; + } + var k = tf.split(_kernel.AsTensor(), num_split: 4, axis: 1); + Tensor k_i = k[0], k_f = k[1], k_c = k[2], k_o = k[3]; + var x_i = math_ops.matmul(inputs_i, k_i); + var x_f = math_ops.matmul(inputs_f, k_f); + var x_c = math_ops.matmul(inputs_c, k_c); + var x_o = math_ops.matmul(inputs_o, k_o); + if(_args.UseBias) + { + var b = tf.split(_bias.AsTensor(), num_split: 4, axis: 0); + Tensor b_i = b[0], b_f = b[1], b_c = b[2], b_o = b[3]; + x_i = gen_nn_ops.bias_add(x_i, b_i); + x_f = gen_nn_ops.bias_add(x_f, b_f); + x_c = gen_nn_ops.bias_add(x_c, b_c); + x_o = gen_nn_ops.bias_add(x_o, b_o); + } + + Tensor h_tm1_i; + Tensor h_tm1_f; + Tensor h_tm1_c; + Tensor h_tm1_o; + if (0f < _args.RecurrentDropout && _args.RecurrentDropout < 1f) + { + h_tm1_i = h_tm1 * rec_dp_mask[0]; + h_tm1_f = h_tm1 * rec_dp_mask[1]; + h_tm1_c = h_tm1 * rec_dp_mask[2]; + h_tm1_o = h_tm1 * rec_dp_mask[3]; + } + else + { + h_tm1_i = h_tm1; + h_tm1_f = h_tm1; + h_tm1_c = h_tm1; + h_tm1_o = h_tm1; + } + var x = new Tensor[] { x_i, x_f, x_c, x_o }; + var h_tm1_array = new Tensor[] { h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o }; + (c, o) = _compute_carry_and_output(x, h_tm1_array, c_tm1); + } + else + { + if (0f < _args.Dropout && _args.Dropout < 1f) + inputs = inputs * dp_mask[0]; + var z = math_ops.matmul(inputs, _kernel.AsTensor()); + z += math_ops.matmul(h_tm1, _recurrent_kernel.AsTensor()); + if (_args.UseBias) + { + z = tf.nn.bias_add(z, _bias); + } + var z_array = tf.split(z, num_split: 4, axis: 1); + (c, o) = _compute_carry_and_output_fused(z_array, c_tm1); + } + var h = o * _args.Activation.Apply(c); + // 这里是因为 Tensors 类初始化的时候会把第一个元素之后的元素打包成一个数组 + return new Tensors(h, h, c); + } + + /// + /// Computes carry and output using split kernels. + /// + /// + /// + /// + /// + /// + public Tensors _compute_carry_and_output(Tensor[] x, Tensor[] h_tm1, Tensor c_tm1) + { + Tensor x_i = x[0], x_f = x[1], x_c = x[2], x_o = x[3]; + Tensor h_tm1_i = h_tm1[0], h_tm1_f = h_tm1[1], h_tm1_c = h_tm1[2], + h_tm1_o = h_tm1[3]; + + var _recurrent_kernel_tensor = _recurrent_kernel.AsTensor(); + var startIndex = _recurrent_kernel_tensor.shape[0]; + var endIndex = _recurrent_kernel_tensor.shape[1]; + var _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, + new[] { 0, 0 }, new[] { startIndex, _args.Units }); + var i = _args.RecurrentActivation.Apply( + x_i + math_ops.matmul(h_tm1_i, _recurrent_kernel_slice)); + _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, + new[] { 0, _args.Units }, new[] { startIndex, _args.Units * 2}); + var f = _args.RecurrentActivation.Apply( + x_f + math_ops.matmul(h_tm1_f, _recurrent_kernel_slice)); + _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, + new[] { 0, _args.Units * 2 }, new[] { startIndex, _args.Units * 3 }); + var c = f * c_tm1 + i * _args.Activation.Apply( + x_c + math_ops.matmul(h_tm1_c, _recurrent_kernel_slice)); + _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, + new[] { 0, _args.Units * 3 }, new[] { startIndex, endIndex }); + var o = _args.RecurrentActivation.Apply( + x_o + math_ops.matmul(h_tm1_o, _recurrent_kernel_slice)); + + return new Tensors(c, o); + } + + /// + /// Computes carry and output using fused kernels. + /// + /// + /// + /// + public Tensors _compute_carry_and_output_fused(Tensor[] z, Tensor c_tm1) + { + Tensor z0 = z[0], z1 = z[1], z2 = z[2], z3 = z[3]; + var i = _args.RecurrentActivation.Apply(z0); + var f = _args.RecurrentActivation.Apply(z1); + var c = f * c_tm1 + i * _args.RecurrentActivation.Apply(z2); + var o = _args.RecurrentActivation.Apply(z3); + return new Tensors(c, o); + } + + public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null) + { + return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size.Value, dtype.Value); } } + + } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs index 3b4b9419e..d318dc45f 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs @@ -74,8 +74,8 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra { // TODO(Rinne): check if it will have multiple tensors when not nested. Tensors prev_output = Nest.IsNested(states) ? new Tensors(states[0]) : states; - var dp_mask = get_dropout_maskcell_for_cell(inputs, training.Value); - var rec_dp_mask = get_recurrent_dropout_maskcell_for_cell(prev_output, training.Value); + var dp_mask = get_dropout_mask_for_cell(inputs, training.Value); + var rec_dp_mask = get_recurrent_dropout_mask_for_cell(prev_output, training.Value); Tensor h; var ranks = inputs.rank; diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index fcb9ad1d6..54ea1565b 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -21,21 +21,6 @@ public class Rnn [TestMethod] public void SimpleRNNCell() { - //var cell = tf.keras.layers.SimpleRNNCell(64, dropout: 0.5f, recurrent_dropout: 0.5f); - //var h0 = new Tensors { tf.zeros(new Shape(4, 64)) }; - //var x = tf.random.normal((4, 100)); - //var (y, h1) = cell.Apply(inputs: x, states: h0); - //var h2 = h1; - //Assert.AreEqual((4, 64), y.shape); - //Assert.AreEqual((4, 64), h2[0].shape); - - //var model = keras.Sequential(new List - //{ - // keras.layers.InputLayer(input_shape: (4,100)), - // keras.layers.SimpleRNNCell(64) - //}); - //model.summary(); - var cell = tf.keras.layers.SimpleRNNCell(64, dropout: 0.5f, recurrent_dropout: 0.5f); var h0 = new Tensors { tf.zeros(new Shape(4, 64)) }; var x = tf.random.normal((4, 100)); @@ -59,6 +44,17 @@ public void StackedRNNCell() Assert.AreEqual((32, 4), state[0].shape); } + [TestMethod] + public void LSTMCell() + { + var inputs = tf.ones((2, 100)); + var states = new Tensors { tf.zeros((2, 4)), tf.zeros((2, 4)) }; + var rnn = tf.keras.layers.LSTMCell(4); + var (output, new_states) = rnn.Apply(inputs, states); + Assert.AreEqual((2, 4), output.shape); + Assert.AreEqual((2, 4), new_states[0].shape); + } + [TestMethod] public void SimpleRNN() { @@ -105,15 +101,27 @@ public void RNNForStackedRNNCell() } [TestMethod] - public void WlzTest() + public void RNNForLSTMCell() { - long[] b = { 1, 2, 3 }; - - Shape a = new Shape(Unknown).concatenate(b); - Console.WriteLine(a); - + var inputs = tf.ones((5, 10, 8)); + var rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4)); + var output = rnn.Apply(inputs); + Console.WriteLine($"output: {output}"); + Assert.AreEqual((5, 4), output.shape); } + [TestMethod] + public void MyTest() + { + var a = tf.zeros((2, 3)); + var b = tf.ones_like(a); + var c = tf.ones((3,4)); + + var d = new Tensors { a, b, c }; + var (A, BC) = d; + Console.WriteLine($"A:{A}"); + Console.WriteLine($"BC:{BC}"); + } } } From 6b30902ee88c7ce608bf7a938eac3dcc1664546b Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Fri, 16 Jun 2023 18:55:23 +0800 Subject: [PATCH 040/182] fix: error after merging LSTM support. --- .../Common/Types/GeneralizedTensorShape.cs | 15 ------- .../Common/Types/NestList.cs | 7 ++- .../Keras/Layers/Rnn/IRnnCell.cs | 4 +- src/TensorFlowNET.Core/Numpy/Shape.cs | 24 +++++++++- .../Operations/NnOps/RNNCell.cs | 4 +- .../Layers/Rnn/DropoutRNNCellMixin.cs | 4 +- .../Layers/Rnn/LSTMCell.cs | 21 +++------ src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 44 +++++++------------ .../Layers/Rnn/SimpleRNNCell.cs | 12 ++--- .../Layers/Rnn/StackedRNNCells.cs | 20 ++++----- src/TensorFlowNET.Keras/Utils/RnnUtils.cs | 13 +++--- 11 files changed, 79 insertions(+), 89 deletions(-) diff --git a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs index 401903159..986136f4d 100644 --- a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs +++ b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs @@ -7,21 +7,6 @@ namespace Tensorflow.Common.Types { public class GeneralizedTensorShape: Nest { - ////public TensorShapeConfig[] Shapes { get; set; } - ///// - ///// create a single-dim generalized Tensor shape. - ///// - ///// - //public GeneralizedTensorShape(int dim, int size = 1) - //{ - // var elem = new TensorShapeConfig() { Items = new long?[] { dim } }; - // Shapes = Enumerable.Repeat(elem, size).ToArray(); - // //Shapes = new TensorShapeConfig[size]; - // //Shapes.Initialize(new TensorShapeConfig() { Items = new long?[] { dim } }); - // //Array.Initialize(Shapes, new TensorShapeConfig() { Items = new long?[] { dim } }); - // ////Shapes = new TensorShapeConfig[] { new TensorShapeConfig() { Items = new long?[] { dim } } }; - //} - public GeneralizedTensorShape(Shape value, string? name = null) { NodeValue = value; diff --git a/src/TensorFlowNET.Core/Common/Types/NestList.cs b/src/TensorFlowNET.Core/Common/Types/NestList.cs index e38675da4..1e0d272b7 100644 --- a/src/TensorFlowNET.Core/Common/Types/NestList.cs +++ b/src/TensorFlowNET.Core/Common/Types/NestList.cs @@ -15,7 +15,12 @@ public sealed class NestList : INestStructure, IEnumerable public int ShallowNestedCount => Values.Count; public int TotalNestedCount => Values.Count; - + + public NestList(params T[] values) + { + Values = new List(values); + } + public NestList(IEnumerable values) { Values = new List(values); diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs index 8614391a6..8d6fbc976 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs @@ -10,11 +10,11 @@ public interface IRnnCell: ILayer /// /// If the derived class tends to not implement it, please return null. /// - GeneralizedTensorShape? StateSize { get; } + INestStructure? StateSize { get; } /// /// If the derived class tends to not implement it, please return null. /// - GeneralizedTensorShape? OutputSize { get; } + INestStructure? OutputSize { get; } /// /// Whether the optional RNN args are supported when appying the layer. /// In other words, whether `Apply` is overwrited with process of `RnnOptionalArgs`. diff --git a/src/TensorFlowNET.Core/Numpy/Shape.cs b/src/TensorFlowNET.Core/Numpy/Shape.cs index c339f12de..cbbf66b44 100644 --- a/src/TensorFlowNET.Core/Numpy/Shape.cs +++ b/src/TensorFlowNET.Core/Numpy/Shape.cs @@ -19,13 +19,14 @@ limitations under the License. using System.Collections.Generic; using System.Linq; using System.Text; +using Tensorflow.Common.Types; using Tensorflow.Keras.Saving.Common; using Tensorflow.NumPy; namespace Tensorflow { [JsonConverter(typeof(CustomizedShapeJsonConverter))] - public class Shape + public class Shape : INestStructure { public int ndim => _dims == null ? -1 : _dims.Length; long[] _dims; @@ -41,6 +42,27 @@ public long[] strides } } + public NestType NestType => NestType.List; + + public int ShallowNestedCount => ndim; + /// + /// The total item count of depth 1 of the nested structure. + /// For example, [1, 2, [3, 4, 5]] has TotalNestedCount = 5. + /// + public int TotalNestedCount => ndim; + + public IEnumerable Flatten() => dims.Select(x => x); + + public INestStructure MapStructure(Func func) + { + return new NestList(dims.Select(x => func(x))); + } + + public Nest AsNest() + { + return new NestList(Flatten()).AsNest(); + } + #region https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/proposals/csharp-8.0/ranges public int Length => ndim; public long[] Slice(int start, int length) diff --git a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs index b651089a5..e488c47e7 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs @@ -185,8 +185,8 @@ public Tensors GetInitialState(Tensors inputs = null, Tensor batch_size = null, { throw new NotImplementedException(); } - public GeneralizedTensorShape StateSize => throw new NotImplementedException(); - public GeneralizedTensorShape OutputSize => throw new NotImplementedException(); + public INestStructure StateSize => throw new NotImplementedException(); + public INestStructure OutputSize => throw new NotImplementedException(); public bool IsTFRnnCell => throw new NotImplementedException(); public bool SupportOptionalArgs => throw new NotImplementedException(); } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs index 1cc36d34a..75feb8ea2 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs @@ -18,8 +18,8 @@ public DropoutRNNCellMixin(LayerArgs args): base(args) } - public abstract GeneralizedTensorShape StateSize { get; } - public abstract GeneralizedTensorShape OutputSize { get; } + public abstract INestStructure StateSize { get; } + public abstract INestStructure OutputSize { get; } public abstract bool SupportOptionalArgs { get; } public virtual Tensors GetInitialState(Tensors inputs, Tensor batch_size, TF_DataType dtype) { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs index 94d98e130..17042767d 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs @@ -22,13 +22,11 @@ public class LSTMCell : DropoutRNNCellMixin IVariableV1 _recurrent_kernel; IInitializer _bias_initializer; IVariableV1 _bias; - GeneralizedTensorShape _state_size; - GeneralizedTensorShape _output_size; - public override GeneralizedTensorShape StateSize => _state_size; + INestStructure _state_size; + INestStructure _output_size; + public override INestStructure StateSize => _state_size; - public override GeneralizedTensorShape OutputSize => _output_size; - - public override bool IsTFRnnCell => true; + public override INestStructure OutputSize => _output_size; public override bool SupportOptionalArgs => false; public LSTMCell(LSTMCellArgs args) @@ -49,10 +47,8 @@ public LSTMCell(LSTMCellArgs args) _args.Implementation = 1; } - _state_size = new GeneralizedTensorShape(_args.Units, 2); - _output_size = new GeneralizedTensorShape(_args.Units); - - + _state_size = new NestList(_args.Units, _args.Units); + _output_size = new NestNode(_args.Units); } public override void build(KerasShapesWrapper input_shape) @@ -229,11 +225,6 @@ public Tensors _compute_carry_and_output_fused(Tensor[] z, Tensor c_tm1) var o = _args.RecurrentActivation.Apply(z3); return new Tensors(c, o); } - - public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null) - { - return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size.Value, dtype.Value); - } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index f99bc23aa..0aeacc25d 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -86,7 +86,7 @@ public Tensors States set { _states = value; } } - private OneOf> compute_output_shape(Shape input_shape) + private INestStructure compute_output_shape(Shape input_shape) { var batch = input_shape[0]; var time_step = input_shape[1]; @@ -96,13 +96,15 @@ private OneOf> compute_output_shape(Shape input_shape) } // state_size is a array of ints or a positive integer - var state_size = Cell.StateSize.ToSingleShape(); + var state_size = Cell.StateSize; + if(state_size?.TotalNestedCount == 1) + { + state_size = new NestList(state_size.Flatten().First()); + } - // TODO(wanglongzhi2001),flat_output_size应该是什么类型的,Shape还是Tensor - Func _get_output_shape; - _get_output_shape = (flat_output_size) => + Func _get_output_shape = (flat_output_size) => { - var output_dim = flat_output_size.as_int_list(); + var output_dim = new Shape(flat_output_size).as_int_list(); Shape output_shape; if (_args.ReturnSequences) { @@ -125,31 +127,28 @@ private OneOf> compute_output_shape(Shape input_shape) Type type = Cell.GetType(); PropertyInfo output_size_info = type.GetProperty("output_size"); - Shape output_shape; + INestStructure output_shape; if (output_size_info != null) { - output_shape = nest.map_structure(_get_output_shape, Cell.OutputSize.ToSingleShape()); - // TODO(wanglongzhi2001),output_shape应该简单的就是一个元组还是一个Shape类型 - output_shape = (output_shape.Length == 1 ? (int)output_shape[0] : output_shape); + output_shape = Nest.MapStructure(_get_output_shape, Cell.OutputSize); } else { - output_shape = _get_output_shape(state_size); + output_shape = new NestNode(_get_output_shape(state_size.Flatten().First())); } if (_args.ReturnState) { - Func _get_state_shape; - _get_state_shape = (flat_state) => + Func _get_state_shape = (flat_state) => { - var state_shape = new int[] { (int)batch }.concat(flat_state.as_int_list()); + var state_shape = new int[] { (int)batch }.concat(new Shape(flat_state).as_int_list()); return new Shape(state_shape); }; - var state_shape = _get_state_shape(state_size); + var state_shape = Nest.MapStructure(_get_state_shape, state_size); - return new List { output_shape, state_shape }; + return new Nest(new[] { output_shape, state_shape } ); } else { @@ -435,7 +434,7 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo tmp.add(tf.math.count_nonzero(s.Single())); } var non_zero_count = tf.add_n(tmp); - //initial_state = tf.cond(non_zero_count > 0, () => States, () => initial_state); + initial_state = tf.cond(non_zero_count > 0, States, initial_state); if ((int)non_zero_count.numpy() > 0) { initial_state = States; @@ -445,16 +444,7 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo { initial_state = States; } - // TODO(Wanglongzhi2001), -// initial_state = tf.nest.map_structure( -//# When the layer has a inferred dtype, use the dtype from the -//# cell. -// lambda v: tf.cast( -// v, self.compute_dtype or self.cell.compute_dtype -// ), -// initial_state, -// ) - + //initial_state = Nest.MapStructure(v => tf.cast(v, this.), initial_state); } else if (initial_state is null) { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs index d318dc45f..8fdc598ed 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs @@ -24,11 +24,11 @@ public class SimpleRNNCell : DropoutRNNCellMixin IVariableV1 _kernel; IVariableV1 _recurrent_kernel; IVariableV1 _bias; - GeneralizedTensorShape _state_size; - GeneralizedTensorShape _output_size; + INestStructure _state_size; + INestStructure _output_size; - public override GeneralizedTensorShape StateSize => _state_size; - public override GeneralizedTensorShape OutputSize => _output_size; + public override INestStructure StateSize => _state_size; + public override INestStructure OutputSize => _output_size; public override bool SupportOptionalArgs => false; public SimpleRNNCell(SimpleRNNCellArgs args) : base(args) @@ -41,8 +41,8 @@ public SimpleRNNCell(SimpleRNNCellArgs args) : base(args) } this._args.Dropout = Math.Min(1f, Math.Max(0f, this._args.Dropout)); this._args.RecurrentDropout = Math.Min(1f, Math.Max(0f, this._args.RecurrentDropout)); - _state_size = new GeneralizedTensorShape(args.Units); - _output_size = new GeneralizedTensorShape(args.Units); + _state_size = new NestNode(args.Units); + _output_size = new NestNode(args.Units); } public override void build(KerasShapesWrapper input_shape) diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs index fb74d6d29..3e7b227c2 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs @@ -1,10 +1,8 @@ using System; -using System.Collections.Generic; using System.ComponentModel; using System.Linq; using Tensorflow.Common.Extensions; using Tensorflow.Common.Types; -using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; @@ -38,24 +36,24 @@ public StackedRNNCells(StackedRNNCellsArgs args) : base(args) public bool SupportOptionalArgs => false; - public GeneralizedTensorShape StateSize + public INestStructure StateSize { get { if (_reverse_state_order) { var state_sizes = Cells.Reverse().Select(cell => cell.StateSize); - return new GeneralizedTensorShape(new Nest(state_sizes.Select(s => new Nest(s)))); + return new Nest(state_sizes); } else { var state_sizes = Cells.Select(cell => cell.StateSize); - return new GeneralizedTensorShape(new Nest(state_sizes.Select(s => new Nest(s)))); + return new Nest(state_sizes); } } } - public GeneralizedTensorShape OutputSize + public INestStructure OutputSize { get { @@ -66,7 +64,7 @@ public GeneralizedTensorShape OutputSize } else if (RnnUtils.is_multiple_state(lastCell.StateSize)) { - return lastCell.StateSize.First(); + return new NestNode(lastCell.StateSize.Flatten().First()); } else { @@ -89,7 +87,7 @@ public Tensors GetInitialState(Tensors inputs = null, Tensor batch_size = null, protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null) { // Recover per-cell states. - var state_size = _reverse_state_order ? new GeneralizedTensorShape(StateSize.Reverse()) : StateSize; + var state_size = _reverse_state_order ? new NestList(StateSize.Flatten().Reverse()) : StateSize; var nested_states = Nest.PackSequenceAs(state_size, Nest.Flatten(states).ToArray()); var new_nest_states = Nest.Empty; @@ -118,20 +116,20 @@ public override void build(KerasShapesWrapper input_shape) layer.build(shape); layer.Built = true; } - GeneralizedTensorShape output_dim; + INestStructure output_dim; if(cell.OutputSize is not null) { output_dim = cell.OutputSize; } else if (RnnUtils.is_multiple_state(cell.StateSize)) { - output_dim = cell.StateSize.First(); + output_dim = new NestNode(cell.StateSize.Flatten().First()); } else { output_dim = cell.StateSize; } - shape = new Shape(new long[] { shape.dims[0] }.Concat(output_dim.ToSingleShape().dims).ToArray()); + shape = new Shape(new long[] { shape.dims[0] }.Concat(output_dim.Flatten()).ToArray()); } this.Built = true; } diff --git a/src/TensorFlowNET.Keras/Utils/RnnUtils.cs b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs index 7ff3f9fb8..e8700c1f2 100644 --- a/src/TensorFlowNET.Keras/Utils/RnnUtils.cs +++ b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs @@ -10,12 +10,11 @@ namespace Tensorflow.Keras.Utils { internal static class RnnUtils { - internal static Tensors generate_zero_filled_state(Tensor batch_size_tensor, GeneralizedTensorShape state_size, TF_DataType dtype) + internal static Tensors generate_zero_filled_state(Tensor batch_size_tensor, INestStructure state_size, TF_DataType dtype) { - Func create_zeros; - create_zeros = (GeneralizedTensorShape unnested_state_size) => + Func create_zeros = (unnested_state_size) => { - var flat_dims = unnested_state_size.ToSingleShape().dims; + var flat_dims = new Shape(unnested_state_size).dims; var init_state_size = new Tensor[] { batch_size_tensor }. Concat(flat_dims.Select(x => tf.constant(x, dtypes.int32))).ToArray(); return array_ops.zeros(init_state_size, dtype: dtype); @@ -24,11 +23,11 @@ internal static Tensors generate_zero_filled_state(Tensor batch_size_tensor, Gen // TODO(Rinne): map structure with nested tensors. if(state_size.TotalNestedCount > 1) { - return new Tensors(state_size.Flatten().Select(s => create_zeros(new GeneralizedTensorShape(s))).ToArray()); + return new Tensors(state_size.Flatten().Select(s => create_zeros(s)).ToArray()); } else { - return create_zeros(state_size); + return create_zeros(state_size.Flatten().First()); } } @@ -96,7 +95,7 @@ internal static (Tensors, Tensors, Tensors) standardize_args(Tensors inputs, Ten /// /// /// - public static bool is_multiple_state(GeneralizedTensorShape state_size) + public static bool is_multiple_state(INestStructure state_size) { return state_size.TotalNestedCount > 1; } From 0114885ed775a2ef9847b64c582039b8324c10d6 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Fri, 16 Jun 2023 19:06:58 +0800 Subject: [PATCH 041/182] feat: update some gen_ops. --- .../Operations/gen_array_ops.cs | 499 +++++++++- .../Operations/gen_functional_ops.cs | 57 ++ .../Operations/gen_io_ops.cs | 936 ++++++++++++++++-- .../Operations/gen_list_ops.cs | 81 ++ .../Operations/gen_math_ops.cs | 588 ++++++++++- .../Operations/gen_nn_ops.cs | 409 ++++++++ tools/Tensorflow.CodeGen/GenOpsWriter.cs | 1 + 7 files changed, 2450 insertions(+), 121 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs index 9810d32f3..8367c2f94 100644 --- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs @@ -2,6 +2,7 @@ using Tensorflow.Eager; using Tensorflow.Contexts; +using Tensorflow.Exceptions; using static Tensorflow.Binding; namespace Tensorflow; @@ -25,6 +26,10 @@ public static Tensor batch_matrix_band_part(Tensor input, Tensor num_lower, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -76,6 +81,10 @@ public static Tensor batch_matrix_diag(Tensor diagonal, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -125,6 +134,10 @@ public static Tensor batch_matrix_diag_part(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -175,6 +188,10 @@ public static Tensor batch_matrix_set_diag(Tensor input, Tensor diagonal, string var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -238,6 +255,10 @@ public static Tensor batch_to_space(Tensor input, Tensor crops, int block_size = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpace", name) { args = new object[] { input, crops }, attrs = new Dictionary() { ["block_size"] = block_size } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -301,6 +322,10 @@ public static Tensor batch_to_space_nd(Tensor input, Tensor block_shape, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpaceND", name) { args = new object[] { input, block_shape, crops }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -407,6 +432,10 @@ public static Tensor bitcast(Tensor input, TF_DataType type, string? name = null var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bitcast", name) { args = new object[] { input }, attrs = new Dictionary() { ["type"] = type } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -464,6 +493,10 @@ public static Tensor broadcast_args(Tensor s0, Tensor s1, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -520,6 +553,10 @@ public static Tensor[] broadcast_gradient_args(Tensor s0, Tensor s1, string? nam var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastGradientArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -607,6 +644,10 @@ public static Tensor broadcast_to(Tensor input, Tensor shape, string? name = nul var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastTo", name) { args = new object[] { input, shape }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -689,6 +730,10 @@ public static Tensor check_numerics(Tensor tensor, string message, string? name var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumerics", name) { args = new object[] { tensor }, attrs = new Dictionary() { ["message"] = message } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -752,6 +797,10 @@ public static Tensor check_numerics_v2(Tensor tensor, string message, string? na var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumericsV2", name) { args = new object[] { tensor }, attrs = new Dictionary() { ["message"] = message } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -803,6 +852,10 @@ public static Tensor concat(Tensor concat_dim, Tensors values, string? name = nu var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Concat", name) { args = new object[] { concat_dim, values }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -871,6 +924,10 @@ public static Tensor[] concat_offset(Tensor concat_dim, Tensors shape, string? n var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatOffset", name) { args = new object[] { concat_dim, shape }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -925,6 +982,10 @@ public static Tensor concat_v2(Tensors values, Tensor axis, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatV2", name) { args = new object[] { values, axis }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -986,6 +1047,10 @@ public static Tensor conjugate_transpose(Tensor x, Tensor perm, string? name = n var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConjugateTranspose", name) { args = new object[] { x, perm }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1041,6 +1106,10 @@ public static Tensor _const(TensorProto value, TF_DataType dtype, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Const", name) { args = new object[] { }, attrs = new Dictionary() { ["value"] = value, ["dtype"] = dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1098,6 +1167,10 @@ public static Tensor debug_gradient_identity(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DebugGradientIdentity", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1182,6 +1255,10 @@ public static Tensor deep_copy(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DeepCopy", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1330,6 +1407,10 @@ public static Tensor depth_to_space(Tensor input, int block_size = 0, string dat var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthToSpace", name) { args = new object[] { input }, attrs = new Dictionary() { ["block_size"] = block_size, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1452,6 +1533,10 @@ public static Tensor dequantize(Tensor input, Tensor min_range, Tensor max_range var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dequantize", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary() { ["mode"] = mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["dtype"] = dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1532,6 +1617,10 @@ public static Tensor diag(Tensor diagonal, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Diag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1603,6 +1692,10 @@ public static Tensor diag_part(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1674,6 +1767,10 @@ public static Tensor edit_distance(Tensor hypothesis_indices, Tensor hypothesis_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EditDistance", name) { args = new object[] { hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape }, attrs = new Dictionary() { ["normalize"] = normalize } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1731,6 +1828,10 @@ public static Tensor empty(Tensor shape, TF_DataType dtype, bool init = false, s var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Empty", name) { args = new object[] { shape }, attrs = new Dictionary() { ["dtype"] = dtype, ["init"] = init } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1793,6 +1894,10 @@ public static Tensor ensure_shape(Tensor input, Shape shape, string? name = null var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EnsureShape", name) { args = new object[] { input }, attrs = new Dictionary() { ["shape"] = shape } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1878,6 +1983,10 @@ public static Tensor expand_dims(Tensor input, Tensor dim, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExpandDims", name) { args = new object[] { input, dim }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1954,6 +2063,10 @@ public static Tensor extract_image_patches(Tensor images, int[] ksizes, int[] st var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractImagePatches", name) { args = new object[] { images }, attrs = new Dictionary() { ["ksizes"] = ksizes, ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2030,6 +2143,10 @@ public static Tensor extract_volume_patches(Tensor input, int[] ksizes, int[] st var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractVolumePatches", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksizes"] = ksizes, ["strides"] = strides, ["padding"] = padding } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2110,6 +2227,10 @@ public static Tensor fake_quant_with_min_max_args(Tensor inputs, float min = -6f var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgs", name) { args = new object[] { inputs }, attrs = new Dictionary() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2168,6 +2289,10 @@ public static Tensor fake_quant_with_min_max_args_gradient(Tensor gradients, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgsGradient", name) { args = new object[] { gradients, inputs }, attrs = new Dictionary() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2254,6 +2379,10 @@ public static Tensor fake_quant_with_min_max_vars(Tensor inputs, Tensor min, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVars", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2320,6 +2449,10 @@ public static Tensor[] fake_quant_with_min_max_vars_gradient(Tensor gradients, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2407,6 +2540,10 @@ public static Tensor fake_quant_with_min_max_vars_per_channel(Tensor inputs, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannel", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2473,6 +2610,10 @@ public static Tensor[] fake_quant_with_min_max_vars_per_channel_gradient(Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannelGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2551,6 +2692,10 @@ public static Tensor fill(Tensor dims, Tensor value, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fill", name) { args = new object[] { dims, value }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2636,6 +2781,10 @@ public static Tensor fingerprint(Tensor data, Tensor method, string? name = null var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fingerprint", name) { args = new object[] { data, method }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2717,6 +2866,10 @@ public static Tensor gather(Tensor params_, Tensor indices, bool validate_indice var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Gather", name) { args = new object[] { params_, indices }, attrs = new Dictionary() { ["validate_indices"] = validate_indices } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2877,6 +3030,10 @@ public static Tensor gather_nd(Tensor params_, Tensor indices, string? name = nu var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherNd", name) { args = new object[] { params_, indices }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2961,6 +3118,10 @@ public static Tensor gather_v2(Tensor params_, Tensor indices, Tensor axis, int var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherV2", name) { args = new object[] { params_, indices, axis }, attrs = new Dictionary() { ["batch_dims"] = batch_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3023,6 +3184,10 @@ public static Tensor guarantee_const(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GuaranteeConst", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3072,6 +3237,10 @@ public static Tensor identity(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Identity", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3129,24 +3298,27 @@ public static Tensor identity_eager_fallback(Tensor input, string name, Context /// /// /// - /// /// - public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = null) + public static Tensor[] identity_n(Tensors input, string? name = null) { var _ctx = tf.Context; if (_ctx.executing_eagerly()) { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityN", name) { args = new object[] { input }, attrs = new Dictionary() { ["T"] = T } }); - return _fast_path_result[0]; + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityN", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (NotOkStatusException ex) + { + throw ex; } catch (Exception) { } try { - return identity_n_eager_fallback(input, T: T, name: name, ctx: _ctx); + return identity_n_eager_fallback(input, name: name, ctx: _ctx); } catch (Exception) { @@ -3154,7 +3326,6 @@ public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = nu } Dictionary keywords = new(); keywords["input"] = input; - keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("IdentityN", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) @@ -3162,19 +3333,19 @@ public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = nu object[] _attrs = new object[] { "T", _op.get_attr("T") }; _execute.record_gradient("IdentityN", _op.inputs, _attrs, _result); } - return _result[0]; + return _result; } - public static Tensor identity_n_eager_fallback(Tensor input, TF_DataType[] T, string name, Context ctx) + public static Tensor[] identity_n_eager_fallback(Tensor input, string name, Context ctx) { Tensor[] _inputs_flat = new Tensor[] { input }; - object[] _attrs = new object[] { "T", T }; + object[] _attrs = new object[] { }; var _result = _execute.execute("IdentityN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); if (_execute.must_record_gradient()) { _execute.record_gradient("IdentityN", _inputs_flat, _attrs, _result); } - return _result[0]; + return _result; } /// /// Returns immutable tensor from memory region. @@ -3211,6 +3382,10 @@ public static Tensor immutable_const(TF_DataType dtype, Shape shape, string memo var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ImmutableConst", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape, ["memory_region_name"] = memory_region_name } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3264,6 +3439,10 @@ public static Tensor inplace_add(Tensor x, Tensor i, Tensor v, string? name = nu var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceAdd", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3317,6 +3496,10 @@ public static Tensor inplace_sub(Tensor x, Tensor i, Tensor v, string? name = nu var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceSub", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3370,6 +3553,10 @@ public static Tensor inplace_update(Tensor x, Tensor i, Tensor v, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceUpdate", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3440,6 +3627,10 @@ public static Tensor invert_permutation(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvertPermutation", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3516,6 +3707,10 @@ public static Tensor[] list_diff(Tensor x, Tensor y, TF_DataType out_idx = TF_Da var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ListDiff", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3590,6 +3785,10 @@ public static Tensor lower_bound(Tensor sorted_inputs, Tensor values, TF_DataTyp var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LowerBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3684,6 +3883,10 @@ public static Tensor matrix_band_part(Tensor input, Tensor num_lower, Tensor num var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3765,6 +3968,10 @@ public static Tensor matrix_diag(Tensor diagonal, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3846,6 +4053,10 @@ public static Tensor matrix_diag_part(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3969,6 +4180,10 @@ public static Tensor matrix_diag_part_v2(Tensor input, Tensor k, Tensor padding_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV2", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4136,6 +4351,10 @@ public static Tensor matrix_diag_part_v3(Tensor input, Tensor k, Tensor padding_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV3", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary() { ["align"] = align } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4287,6 +4506,10 @@ public static Tensor matrix_diag_v2(Tensor diagonal, Tensor k, Tensor num_rows, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV2", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4475,6 +4698,10 @@ public static Tensor matrix_diag_v3(Tensor diagonal, Tensor k, Tensor num_rows, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV3", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary() { ["align"] = align } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4550,6 +4777,10 @@ public static Tensor matrix_set_diag(Tensor input, Tensor diagonal, string? name var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4677,6 +4908,10 @@ public static Tensor matrix_set_diag_v2(Tensor input, Tensor diagonal, Tensor k, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV2", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4849,6 +5084,10 @@ public static Tensor matrix_set_diag_v3(Tensor input, Tensor diagonal, Tensor k, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV3", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary() { ["align"] = align } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4944,6 +5183,10 @@ public static Tensor mirror_pad(Tensor input, Tensor paddings, string mode, stri var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["mode"] = mode } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5023,6 +5266,10 @@ public static Tensor mirror_pad_grad(Tensor input, Tensor paddings, string mode, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPadGrad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["mode"] = mode } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5173,6 +5420,10 @@ public static Tensor one_hot(Tensor indices, Tensor depth, Tensor on_value, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OneHot", name) { args = new object[] { indices, depth, on_value, off_value }, attrs = new Dictionary() { ["axis"] = axis } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5226,6 +5477,10 @@ public static Tensor ones_like(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OnesLike", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5304,6 +5559,10 @@ public static Tensor pack(Tensors values, int axis = 0, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pack", name) { args = new object[] { values }, attrs = new Dictionary() { ["axis"] = axis } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5384,6 +5643,10 @@ public static Tensor pad(Tensor input, Tensor paddings, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5464,6 +5727,10 @@ public static Tensor pad_v2(Tensor input, Tensor paddings, Tensor constant_value var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PadV2", name) { args = new object[] { input, paddings, constant_values }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5541,6 +5808,10 @@ public static Tensor parallel_concat(Tensors values, Shape shape, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ParallelConcat", name) { args = new object[] { values }, attrs = new Dictionary() { ["shape"] = shape } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5610,6 +5881,10 @@ public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string? var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Placeholder", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5677,6 +5952,10 @@ public static Tensor placeholder_v2(TF_DataType dtype, Shape shape, string? name var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderV2", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5732,6 +6011,10 @@ public static Tensor placeholder_with_default(Tensor input, Shape shape, string? var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderWithDefault", name) { args = new object[] { input }, attrs = new Dictionary() { ["shape"] = shape } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5799,6 +6082,10 @@ public static Tensor prevent_gradient(Tensor input, string message = "", string? var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PreventGradient", name) { args = new object[] { input }, attrs = new Dictionary() { ["message"] = message } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5858,6 +6145,10 @@ public static Tensor quantize_and_dequantize(Tensor input, bool signed_input = t var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantize", name) { args = new object[] { input }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["input_min"] = input_min, ["input_max"] = input_max } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6011,6 +6302,10 @@ public static Tensor quantize_and_dequantize_v2(Tensor input, Tensor input_min, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV2", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6085,6 +6380,10 @@ public static Tensor quantize_and_dequantize_v3(Tensor input, Tensor input_min, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV3", name) { args = new object[] { input, input_min, input_max, num_bits }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["range_given"] = range_given, ["narrow_range"] = narrow_range, ["axis"] = axis } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6190,6 +6489,10 @@ public static Tensor quantize_and_dequantize_v4(Tensor input, Tensor input_min, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV4", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6387,6 +6690,10 @@ public static Tensor[] quantize_v2(Tensor input, Tensor min_range, Tensor max_ra var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeV2", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary() { ["T"] = T, ["mode"] = mode, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["ensure_minimum_range"] = ensure_minimum_range } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6455,6 +6762,10 @@ public static Tensor[] quantized_concat(Tensor concat_dim, Tensors values, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConcat", name) { args = new object[] { concat_dim, values, input_mins, input_maxes }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6541,6 +6852,10 @@ public static Tensor[] quantized_instance_norm(Tensor x, Tensor x_min, Tensor x_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedInstanceNorm", name) { args = new object[] { x, x_min, x_max }, attrs = new Dictionary() { ["output_range_given"] = output_range_given, ["given_y_min"] = given_y_min, ["given_y_max"] = given_y_max, ["variance_epsilon"] = variance_epsilon, ["min_separation"] = min_separation } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6605,6 +6920,10 @@ public static Tensor[] quantized_reshape(Tensor tensor, Tensor shape, Tensor inp var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReshape", name) { args = new object[] { tensor, shape, input_min, input_max }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6674,6 +6993,10 @@ public static Tensor rank(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rank", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6815,6 +7138,10 @@ public static Tensor reshape(Tensor tensor, Tensor shape, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reshape", name) { args = new object[] { tensor, shape }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6884,6 +7211,10 @@ public static Operation resource_strided_slice_assign(Tensor ref_, Tensor begin, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceStridedSliceAssign", name) { args = new object[] { ref_, begin, end, strides, value }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6991,6 +7322,10 @@ public static Tensor reverse(Tensor tensor, Tensor dims, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reverse", name) { args = new object[] { tensor, dims }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7110,6 +7445,10 @@ public static Tensor reverse_sequence(Tensor input, Tensor seq_lengths, int seq_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReverseSequence", name) { args = new object[] { input, seq_lengths }, attrs = new Dictionary() { ["seq_dim"] = seq_dim, ["batch_dim"] = batch_dim } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7210,6 +7549,10 @@ public static Tensor reverse_v2(Tensor tensor, Tensor axis, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReverseV2", name) { args = new object[] { tensor, axis }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7352,6 +7695,10 @@ public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor shape, st var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ScatterNd", name) { args = new object[] { indices, updates, shape }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7442,6 +7789,10 @@ public static Tensor scatter_nd_non_aliasing_add(Tensor input, Tensor indices, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ScatterNdNonAliasingAdd", name) { args = new object[] { input, indices, updates }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7506,6 +7857,10 @@ public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_I var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Shape", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7562,6 +7917,10 @@ public static Tensor[] shape_n(Tensors input, TF_DataType out_type = TF_DataType var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShapeN", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7628,6 +7987,10 @@ public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_IN var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Size", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7690,6 +8053,10 @@ public static Tensor slice(Tensor input, Tensor begin, Tensor size, string? name var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Slice", name) { args = new object[] { input, begin, size }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7741,6 +8108,10 @@ public static Tensor snapshot(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Snapshot", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7879,6 +8250,10 @@ public static Tensor space_to_batch(Tensor input, Tensor paddings, int block_siz var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToBatch", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["block_size"] = block_size } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8048,6 +8423,10 @@ public static Tensor space_to_batch_nd(Tensor input, Tensor block_shape, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToBatchND", name) { args = new object[] { input, block_shape, paddings }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8192,6 +8571,10 @@ public static Tensor space_to_depth(Tensor input, int block_size = 0, string dat var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToDepth", name) { args = new object[] { input }, attrs = new Dictionary() { ["block_size"] = block_size, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8254,6 +8637,10 @@ public static Tensor[] split(Tensor split_dim, Tensor value, int num_split = 0, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Split", name) { args = new object[] { split_dim, value }, attrs = new Dictionary() { ["num_split"] = num_split } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8308,6 +8695,10 @@ public static Tensor[] split_v(Tensor value, Tensor size_splits, Tensor split_di var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SplitV", name) { args = new object[] { value, size_splits, split_dim }, attrs = new Dictionary() { ["num_split"] = num_split } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8393,6 +8784,10 @@ public static Tensor squeeze(Tensor input, int[] squeeze_dims = null, string? na var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Squeeze", name) { args = new object[] { input }, attrs = new Dictionary() { ["squeeze_dims"] = squeeze_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8504,6 +8899,10 @@ public static Tensor stop_gradient(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StopGradient", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8689,6 +9088,10 @@ public static Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StridedSlice", name) { args = new object[] { input, begin, end, strides }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8823,6 +9226,10 @@ public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StridedSliceGrad", name) { args = new object[] { shape, begin, end, strides, dy }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8946,6 +9353,10 @@ public static Tensor tensor_scatter_add(Tensor tensor, Tensor indices, Tensor up var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterAdd", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9013,6 +9424,10 @@ public static Tensor tensor_scatter_max(Tensor tensor, Tensor indices, Tensor up var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterMax", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9066,6 +9481,10 @@ public static Tensor tensor_scatter_min(Tensor tensor, Tensor indices, Tensor up var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterMin", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9185,6 +9604,10 @@ public static Tensor tensor_scatter_sub(Tensor tensor, Tensor indices, Tensor up var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterSub", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9278,6 +9701,10 @@ public static Tensor tensor_scatter_update(Tensor tensor, Tensor indices, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterUpdate", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9348,6 +9775,10 @@ public static Tensor tensor_strided_slice_update(Tensor input, Tensor begin, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorStridedSliceUpdate", name) { args = new object[] { input, begin, end, strides, value }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9437,6 +9868,10 @@ public static Tensor tile(Tensor input, Tensor multiples, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tile", name) { args = new object[] { input, multiples }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9495,6 +9930,10 @@ public static Tensor tile_grad(Tensor input, Tensor multiples, string? name = nu var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TileGrad", name) { args = new object[] { input, multiples }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9552,6 +9991,10 @@ public static Tensor transpose(Tensor x, Tensor perm, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Transpose", name) { args = new object[] { x, perm }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9629,6 +10072,10 @@ public static Tensor[] unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Unique", name) { args = new object[] { x }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9728,6 +10175,10 @@ public static Tensor[] unique_v2(Tensor x, Tensor axis, TF_DataType out_idx = TF var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueV2", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9801,6 +10252,10 @@ public static Tensor[] unique_with_counts(Tensor x, TF_DataType out_idx = TF_Dat var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueWithCounts", name) { args = new object[] { x }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9904,6 +10359,10 @@ public static Tensor[] unique_with_counts_v2(Tensor x, Tensor axis, TF_DataType var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueWithCountsV2", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9978,6 +10437,10 @@ public static Tensor[] unpack(Tensor value, int num = 0, int axis = 0, string? n var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Unpack", name) { args = new object[] { value }, attrs = new Dictionary() { ["num"] = num, ["axis"] = axis } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -10054,6 +10517,10 @@ public static Tensor unravel_index(Tensor indices, Tensor dims, string? name = n var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnravelIndex", name) { args = new object[] { indices, dims }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -10127,6 +10594,10 @@ public static Tensor upper_bound(Tensor sorted_inputs, Tensor values, TF_DataTyp var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UpperBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -10241,6 +10712,10 @@ public static Tensor where(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Where", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -10290,6 +10765,10 @@ public static Tensor zeros_like(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ZerosLike", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } diff --git a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs index e1cf1c138..6ec426f58 100644 --- a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs @@ -2,6 +2,7 @@ using Tensorflow.Eager; using Tensorflow.Contexts; +using Tensorflow.Exceptions; using static Tensorflow.Binding; namespace Tensorflow; @@ -54,6 +55,10 @@ public static Tensor[] _case(Tensor branch_index, Tensors input, TF_DataType[] T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Case", name) { args = new object[] { branch_index, input }, attrs = new Dictionary() { ["Tout"] = Tout, ["branches"] = branches, ["output_shapes"] = output_shapes } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -115,6 +120,10 @@ public static Tensor device_index(string[] device_names, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DeviceIndex", name) { args = new object[] { }, attrs = new Dictionary() { ["device_names"] = device_names } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -172,6 +181,10 @@ public static Tensor fake_param(TF_DataType dtype, Shape shape, string? name = n var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeParam", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -240,6 +253,10 @@ public static Tensor[] _for(Tensor start, Tensor limit, Tensor delta, Tensors in var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "For", name) { args = new object[] { start, limit, delta, input }, attrs = new Dictionary() { ["body"] = body } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -310,6 +327,10 @@ public static Tensor[] _if(Tensor cond, Tensors input, TF_DataType[] Tout, objec var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "If", name) { args = new object[] { cond, input }, attrs = new Dictionary() { ["Tout"] = Tout, ["then_branch"] = then_branch, ["else_branch"] = else_branch, ["output_shapes"] = output_shapes } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -385,6 +406,10 @@ public static Tensor[] partitioned_call(Tensors args, TF_DataType[] Tout, object var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PartitionedCall", name) { args = new object[] { args }, attrs = new Dictionary() { ["Tout"] = Tout, ["f"] = f, ["config"] = config, ["config_proto"] = config_proto, ["executor_type"] = executor_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -462,6 +487,10 @@ public static Tensor[] remote_call(Tensor target, Tensors args, TF_DataType[] To var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RemoteCall", name) { args = new object[] { target, args }, attrs = new Dictionary() { ["Tout"] = Tout, ["f"] = f } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -529,6 +558,10 @@ public static Tensor[] stateful_partitioned_call(Tensors args, TF_DataType[] Tou var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatefulPartitionedCall", name) { args = new object[] { args }, attrs = new Dictionary() { ["Tout"] = Tout, ["f"] = f, ["config"] = config, ["config_proto"] = config_proto, ["executor_type"] = executor_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -628,6 +661,10 @@ public static Tensor[] stateless_case(Tensor branch_index, Tensors input, TF_Dat var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessCase", name) { args = new object[] { branch_index, input }, attrs = new Dictionary() { ["Tout"] = Tout, ["branches"] = branches, ["output_shapes"] = output_shapes } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -698,6 +735,10 @@ public static Tensor[] stateless_if(Tensor cond, Tensors input, TF_DataType[] To var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessIf", name) { args = new object[] { cond, input }, attrs = new Dictionary() { ["Tout"] = Tout, ["then_branch"] = then_branch, ["else_branch"] = else_branch, ["output_shapes"] = output_shapes } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -775,6 +816,10 @@ public static Tensor[] stateless_while(Tensors input, object cond, object body, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessWhile", name) { args = new object[] { input }, attrs = new Dictionary() { ["cond"] = cond, ["body"] = body, ["output_shapes"] = output_shapes, ["parallel_iterations"] = parallel_iterations } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -855,6 +900,10 @@ public static Tensor[] symbolic_gradient(Tensors input, TF_DataType[] Tout, obje var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SymbolicGradient", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout, ["f"] = f } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -922,6 +971,10 @@ public static Tensor to_bool(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ToBool", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -991,6 +1044,10 @@ public static Tensor[] _while(Tensors input, object cond, object body, Shape[] o var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "While", name) { args = new object[] { input }, attrs = new Dictionary() { ["cond"] = cond, ["body"] = body, ["output_shapes"] = output_shapes, ["parallel_iterations"] = parallel_iterations } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } diff --git a/src/TensorFlowNET.Core/Operations/gen_io_ops.cs b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs index 490cb1880..0b92ff360 100644 --- a/src/TensorFlowNET.Core/Operations/gen_io_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs @@ -2,12 +2,50 @@ using Tensorflow.Eager; using Tensorflow.Contexts; +using Tensorflow.Exceptions; using static Tensorflow.Binding; namespace Tensorflow; -internal static class gen_io_ops +public static class gen_io_ops { + /// + /// A Reader that outputs fixed-length records from a file. + /// + /// + /// + /// Number of bytes in the header, defaults to 0. + /// + /// + /// + /// + /// Number of bytes in the record. + /// + /// + /// + /// + /// Number of bytes in the footer, defaults to 0. + /// + /// + /// + /// + /// Number of bytes to hop before each read. Default of 0 means using + /// record_bytes. + /// + /// + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// public static Tensor fixed_length_record_reader(int header_bytes = 0, int record_bytes = 0, int footer_bytes = 0, int hop_bytes = 0, string container = "", string shared_name = "", string? name = null) { var _ctx = tf.Context; @@ -15,9 +53,13 @@ public static Tensor fixed_length_record_reader(int header_bytes = 0, int record { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReader", name, "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReader", name) { args = new object[] { }, attrs = new Dictionary() { ["header_bytes"] = header_bytes, ["record_bytes"] = record_bytes, ["footer_bytes"] = footer_bytes, ["hop_bytes"] = hop_bytes, ["container"] = container, ["shared_name"] = shared_name } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -29,8 +71,22 @@ public static Tensor fixed_length_record_reader(int header_bytes = 0, int record { } } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } Dictionary keywords = new(); - keywords["header_bytes"] = header_bytes; keywords["record_bytes"] = record_bytes; keywords["footer_bytes"] = footer_bytes; keywords["hop_bytes"] = hop_bytes; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReader", name, keywords); + keywords["header_bytes"] = header_bytes; + keywords["record_bytes"] = record_bytes; + keywords["footer_bytes"] = footer_bytes; + keywords["hop_bytes"] = hop_bytes; + keywords["container"] = container; + keywords["shared_name"] = shared_name; + var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReader", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -51,6 +107,49 @@ public static Tensor fixed_length_record_reader_eager_fallback(int header_bytes, } return _result[0]; } + /// + /// A Reader that outputs fixed-length records from a file. + /// + /// + /// + /// Number of bytes in the header, defaults to 0. + /// + /// + /// + /// + /// Number of bytes in the record. + /// + /// + /// + /// + /// Number of bytes in the footer, defaults to 0. + /// + /// + /// + /// + /// Number of bytes to hop before each read. Default of 0 means using + /// record_bytes. + /// + /// + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// + /// + /// The type of encoding for the file. Currently ZLIB and GZIP + /// are supported. Defaults to none. + /// + /// + /// public static Tensor fixed_length_record_reader_v2(int header_bytes = 0, int record_bytes = 0, int footer_bytes = 0, int hop_bytes = 0, string container = "", string shared_name = "", string encoding = "", string? name = null) { var _ctx = tf.Context; @@ -58,9 +157,13 @@ public static Tensor fixed_length_record_reader_v2(int header_bytes = 0, int rec { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReaderV2", name, "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name, "encoding", encoding)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReaderV2", name) { args = new object[] { }, attrs = new Dictionary() { ["header_bytes"] = header_bytes, ["record_bytes"] = record_bytes, ["footer_bytes"] = footer_bytes, ["hop_bytes"] = hop_bytes, ["container"] = container, ["shared_name"] = shared_name, ["encoding"] = encoding } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -72,8 +175,27 @@ public static Tensor fixed_length_record_reader_v2(int header_bytes = 0, int rec { } } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } + if (encoding is null) + { + encoding = ""; + } Dictionary keywords = new(); - keywords["header_bytes"] = header_bytes; keywords["record_bytes"] = record_bytes; keywords["footer_bytes"] = footer_bytes; keywords["hop_bytes"] = hop_bytes; keywords["container"] = container; keywords["shared_name"] = shared_name; keywords["encoding"] = encoding; var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReaderV2", name, keywords); + keywords["header_bytes"] = header_bytes; + keywords["record_bytes"] = record_bytes; + keywords["footer_bytes"] = footer_bytes; + keywords["hop_bytes"] = hop_bytes; + keywords["container"] = container; + keywords["shared_name"] = shared_name; + keywords["encoding"] = encoding; + var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReaderV2", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -94,6 +216,28 @@ public static Tensor fixed_length_record_reader_v2_eager_fallback(int header_byt } return _result[0]; } + /// + /// A Reader that outputs the queued work as both the key and value. + /// + /// + /// + /// To use, enqueue strings in a Queue. ReaderRead will take the front + /// work string and output (work, work). + /// + /// + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// public static Tensor identity_reader(string container = "", string shared_name = "", string? name = null) { var _ctx = tf.Context; @@ -101,9 +245,13 @@ public static Tensor identity_reader(string container = "", string shared_name = { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReader", name, "container", container, "shared_name", shared_name)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReader", name) { args = new object[] { }, attrs = new Dictionary() { ["container"] = container, ["shared_name"] = shared_name } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -115,8 +263,18 @@ public static Tensor identity_reader(string container = "", string shared_name = { } } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } Dictionary keywords = new(); - keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("IdentityReader", name, keywords); + keywords["container"] = container; + keywords["shared_name"] = shared_name; + var _op = tf.OpDefLib._apply_op_helper("IdentityReader", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -137,6 +295,28 @@ public static Tensor identity_reader_eager_fallback(string container, string sha } return _result[0]; } + /// + /// A Reader that outputs the queued work as both the key and value. + /// + /// + /// + /// To use, enqueue strings in a Queue. ReaderRead will take the front + /// work string and output (work, work). + /// + /// + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// public static Tensor identity_reader_v2(string container = "", string shared_name = "", string? name = null) { var _ctx = tf.Context; @@ -144,9 +324,13 @@ public static Tensor identity_reader_v2(string container = "", string shared_nam { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReaderV2", name, "container", container, "shared_name", shared_name)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReaderV2", name) { args = new object[] { }, attrs = new Dictionary() { ["container"] = container, ["shared_name"] = shared_name } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -158,8 +342,18 @@ public static Tensor identity_reader_v2(string container = "", string shared_nam { } } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } Dictionary keywords = new(); - keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("IdentityReaderV2", name, keywords); + keywords["container"] = container; + keywords["shared_name"] = shared_name; + var _op = tf.OpDefLib._apply_op_helper("IdentityReaderV2", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -180,6 +374,18 @@ public static Tensor identity_reader_v2_eager_fallback(string container, string } return _result[0]; } + /// + /// Returns the set of files matching one or more glob patterns. + /// + /// + /// + /// Note that this routine only supports wildcard characters in the + /// basename portion of the pattern, not in the directory portion. + /// Note also that the order of filenames returned is deterministic. + /// + /// + /// + /// public static Tensor matching_files(Tensor pattern, string? name = null) { var _ctx = tf.Context; @@ -187,9 +393,13 @@ public static Tensor matching_files(Tensor pattern, string? name = null) { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatchingFiles", name, pattern)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatchingFiles", name) { args = new object[] { pattern }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -224,51 +434,11 @@ public static Tensor matching_files_eager_fallback(Tensor pattern, string name, } return _result[0]; } - public static Operation merge_v2_checkpoints(Tensor checkpoint_prefixes, Tensor destination_prefix, bool delete_old_dirs = true, bool allow_missing_files = false, string? name = null) - { - var _ctx = tf.Context; - if (_ctx.executing_eagerly()) - { - try - { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MergeV2Checkpoints", name, checkpoint_prefixes, destination_prefix, "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files)); - return null; - } - catch (Exception) - { - } - try - { - return merge_v2_checkpoints_eager_fallback(checkpoint_prefixes, destination_prefix, delete_old_dirs: delete_old_dirs, allow_missing_files: allow_missing_files, name: name, ctx: _ctx); - } - catch (Exception) - { - } - } - Dictionary keywords = new(); - keywords["checkpoint_prefixes"] = checkpoint_prefixes; - keywords["destination_prefix"] = destination_prefix; - keywords["delete_old_dirs"] = delete_old_dirs; keywords["allow_missing_files"] = allow_missing_files; var _op = tf.OpDefLib._apply_op_helper("MergeV2Checkpoints", name, keywords); - var _result = _op.outputs; - if (_execute.must_record_gradient()) - { - object[] _attrs = new object[] { "delete_old_dirs", _op._get_attr_bool("delete_old_dirs"), "allow_missing_files", _op._get_attr_bool("allow_missing_files") }; - _execute.record_gradient("MergeV2Checkpoints", _op.inputs, _attrs, _result); - } - return _op; - } - - public static Tensor merge_v2_checkpoints_eager_fallback(Tensor checkpoint_prefixes, Tensor destination_prefix, bool delete_old_dirs, bool allow_missing_files, string name, Context ctx) - { - Tensor[] _inputs_flat = new Tensor[] { checkpoint_prefixes, destination_prefix }; - object[] _attrs = new object[] { "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files }; - var _result = _execute.execute("MergeV2Checkpoints", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); - if (_execute.must_record_gradient()) - { - _execute.record_gradient("MergeV2Checkpoints", _inputs_flat, _attrs, _result); - } - return null; - } + /// + /// Reads and outputs the entire contents of the input filename. + /// + /// + /// public static Tensor read_file(Tensor filename, string? name = null) { var _ctx = tf.Context; @@ -276,9 +446,13 @@ public static Tensor read_file(Tensor filename, string? name = null) { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReadFile", name, filename)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReadFile", name) { args = new object[] { filename }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -313,6 +487,17 @@ public static Tensor read_file_eager_fallback(Tensor filename, string name, Cont } return _result[0]; } + /// + /// Returns the number of records this Reader has produced. + /// + /// + /// + /// This is the same as the number of ReaderRead executions that have + /// succeeded. + /// + /// + /// + /// public static Tensor reader_num_records_produced(Tensor reader_handle, string? name = null) { var _ctx = tf.Context; @@ -336,6 +521,17 @@ public static Tensor reader_num_records_produced_eager_fallback(Tensor reader_ha { throw new RuntimeError($"reader_num_records_produced op does not support eager execution. Arg 'reader_handle' is a ref."); } + /// + /// Returns the number of records this Reader has produced. + /// + /// + /// + /// This is the same as the number of ReaderRead executions that have + /// succeeded. + /// + /// + /// + /// public static Tensor reader_num_records_produced_v2(Tensor reader_handle, string? name = null) { var _ctx = tf.Context; @@ -343,9 +539,13 @@ public static Tensor reader_num_records_produced_v2(Tensor reader_handle, string { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumRecordsProducedV2", name, reader_handle)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumRecordsProducedV2", name) { args = new object[] { reader_handle }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -380,6 +580,11 @@ public static Tensor reader_num_records_produced_v2_eager_fallback(Tensor reader } return _result[0]; } + /// + /// Returns the number of work units this Reader has finished processing. + /// + /// + /// public static Tensor reader_num_work_units_completed(Tensor reader_handle, string? name = null) { var _ctx = tf.Context; @@ -403,6 +608,11 @@ public static Tensor reader_num_work_units_completed_eager_fallback(Tensor reade { throw new RuntimeError($"reader_num_work_units_completed op does not support eager execution. Arg 'reader_handle' is a ref."); } + /// + /// Returns the number of work units this Reader has finished processing. + /// + /// + /// public static Tensor reader_num_work_units_completed_v2(Tensor reader_handle, string? name = null) { var _ctx = tf.Context; @@ -410,9 +620,13 @@ public static Tensor reader_num_work_units_completed_v2(Tensor reader_handle, st { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumWorkUnitsCompletedV2", name, reader_handle)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumWorkUnitsCompletedV2", name) { args = new object[] { reader_handle }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -447,6 +661,19 @@ public static Tensor reader_num_work_units_completed_v2_eager_fallback(Tensor re } return _result[0]; } + /// + /// Returns the next record (key, value pair) produced by a Reader. + /// + /// + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// + /// + /// + /// + /// public static Tensor[] reader_read(Tensor reader_handle, Tensor queue_handle, string? name = null) { var _ctx = tf.Context; @@ -471,6 +698,21 @@ public static Tensor[] reader_read_eager_fallback(Tensor reader_handle, Tensor q { throw new RuntimeError($"reader_read op does not support eager execution. Arg 'reader_handle' is a ref."); } + /// + /// Returns up to `num_records` (key, value) pairs produced by a Reader. + /// + /// + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// It may return less than `num_records` even before the last batch. + /// + /// + /// + /// + /// + /// public static Tensor[] reader_read_up_to(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string? name = null) { var _ctx = tf.Context; @@ -496,6 +738,21 @@ public static Tensor[] reader_read_up_to_eager_fallback(Tensor reader_handle, Te { throw new RuntimeError($"reader_read_up_to op does not support eager execution. Arg 'reader_handle' is a ref."); } + /// + /// Returns up to `num_records` (key, value) pairs produced by a Reader. + /// + /// + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// It may return less than `num_records` even before the last batch. + /// + /// + /// + /// + /// + /// public static Tensor[] reader_read_up_to_v2(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string? name = null) { var _ctx = tf.Context; @@ -503,9 +760,13 @@ public static Tensor[] reader_read_up_to_v2(Tensor reader_handle, Tensor queue_h { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadUpToV2", name, reader_handle, queue_handle, num_records)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadUpToV2", name) { args = new object[] { reader_handle, queue_handle, num_records }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -542,6 +803,19 @@ public static Tensor[] reader_read_up_to_v2_eager_fallback(Tensor reader_handle, } return _result; } + /// + /// Returns the next record (key, value pair) produced by a Reader. + /// + /// + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// + /// + /// + /// + /// public static Tensor[] reader_read_v2(Tensor reader_handle, Tensor queue_handle, string? name = null) { var _ctx = tf.Context; @@ -549,9 +823,13 @@ public static Tensor[] reader_read_v2(Tensor reader_handle, Tensor queue_handle, { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadV2", name, reader_handle, queue_handle)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadV2", name) { args = new object[] { reader_handle, queue_handle }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -587,6 +865,11 @@ public static Tensor[] reader_read_v2_eager_fallback(Tensor reader_handle, Tenso } return _result; } + /// + /// Restore a Reader to its initial clean state. + /// + /// + /// public static Operation reader_reset(Tensor reader_handle, string? name = null) { var _ctx = tf.Context; @@ -606,10 +889,15 @@ public static Operation reader_reset(Tensor reader_handle, string? name = null) return _op; } - public static Tensor reader_reset_eager_fallback(Tensor reader_handle, string name, Context ctx) + public static Operation reader_reset_eager_fallback(Tensor reader_handle, string name, Context ctx) { throw new RuntimeError($"reader_reset op does not support eager execution. Arg 'reader_handle' is a ref."); } + /// + /// Restore a Reader to its initial clean state. + /// + /// + /// public static Operation reader_reset_v2(Tensor reader_handle, string? name = null) { var _ctx = tf.Context; @@ -617,9 +905,13 @@ public static Operation reader_reset_v2(Tensor reader_handle, string? name = nul { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderResetV2", name, reader_handle)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderResetV2", name) { args = new object[] { reader_handle }, attrs = new Dictionary() { } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -643,7 +935,7 @@ public static Operation reader_reset_v2(Tensor reader_handle, string? name = nul return _op; } - public static Tensor reader_reset_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + public static Operation reader_reset_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) { Tensor[] _inputs_flat = new Tensor[] { reader_handle }; object[] _attrs = new object[] { }; @@ -654,6 +946,18 @@ public static Tensor reader_reset_v2_eager_fallback(Tensor reader_handle, string } return null; } + /// + /// Restore a reader to a previously saved state. + /// + /// + /// + /// Not all Readers support being restored, so this can produce an + /// Unimplemented error. + /// + /// + /// + /// + /// public static Operation reader_restore_state(Tensor reader_handle, Tensor state, string? name = null) { var _ctx = tf.Context; @@ -674,10 +978,22 @@ public static Operation reader_restore_state(Tensor reader_handle, Tensor state, return _op; } - public static Tensor reader_restore_state_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx) + public static Operation reader_restore_state_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx) { throw new RuntimeError($"reader_restore_state op does not support eager execution. Arg 'reader_handle' is a ref."); } + /// + /// Restore a reader to a previously saved state. + /// + /// + /// + /// Not all Readers support being restored, so this can produce an + /// Unimplemented error. + /// + /// + /// + /// + /// public static Operation reader_restore_state_v2(Tensor reader_handle, Tensor state, string? name = null) { var _ctx = tf.Context; @@ -685,9 +1001,13 @@ public static Operation reader_restore_state_v2(Tensor reader_handle, Tensor sta { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderRestoreStateV2", name, reader_handle, state)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderRestoreStateV2", name) { args = new object[] { reader_handle, state }, attrs = new Dictionary() { } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -712,7 +1032,7 @@ public static Operation reader_restore_state_v2(Tensor reader_handle, Tensor sta return _op; } - public static Tensor reader_restore_state_v2_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx) + public static Operation reader_restore_state_v2_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx) { Tensor[] _inputs_flat = new Tensor[] { reader_handle, state }; object[] _attrs = new object[] { }; @@ -723,6 +1043,17 @@ public static Tensor reader_restore_state_v2_eager_fallback(Tensor reader_handle } return null; } + /// + /// Produce a string tensor that encodes the state of a Reader. + /// + /// + /// + /// Not all Readers support being serialized, so this can produce an + /// Unimplemented error. + /// + /// + /// + /// public static Tensor reader_serialize_state(Tensor reader_handle, string? name = null) { var _ctx = tf.Context; @@ -746,6 +1077,17 @@ public static Tensor reader_serialize_state_eager_fallback(Tensor reader_handle, { throw new RuntimeError($"reader_serialize_state op does not support eager execution. Arg 'reader_handle' is a ref."); } + /// + /// Produce a string tensor that encodes the state of a Reader. + /// + /// + /// + /// Not all Readers support being serialized, so this can produce an + /// Unimplemented error. + /// + /// + /// + /// public static Tensor reader_serialize_state_v2(Tensor reader_handle, string? name = null) { var _ctx = tf.Context; @@ -753,9 +1095,13 @@ public static Tensor reader_serialize_state_v2(Tensor reader_handle, string? nam { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderSerializeStateV2", name, reader_handle)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderSerializeStateV2", name) { args = new object[] { reader_handle }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -790,6 +1136,43 @@ public static Tensor reader_serialize_state_v2_eager_fallback(Tensor reader_hand } return _result[0]; } + /// + /// Restores a tensor from checkpoint files. + /// + /// + /// + /// Reads a tensor stored in one or several files. If there are several files (for + /// instance because a tensor was saved as slices), `file_pattern` may contain + /// wildcard symbols (`*` and `?`) in the filename portion only, not in the + /// directory portion. + /// + /// If a `file_pattern` matches several files, `preferred_shard` can be used to hint + /// in which file the requested tensor is likely to be found. This op will first + /// open the file at index `preferred_shard` in the list of matching files and try + /// to restore tensors from that file. Only if some tensors or tensor slices are + /// not found in that first file, then the Op opens all the files. Setting + /// `preferred_shard` to match the value passed as the `shard` input + /// of a matching `Save` Op may speed up Restore. This attribute only affects + /// performance, not correctness. The default value -1 means files are processed in + /// order. + /// + /// See also `RestoreSlice`. + /// + /// + /// + /// + /// + /// + /// The type of the tensor to be restored. + /// + /// + /// + /// + /// Index of file to open first if multiple files match + /// `file_pattern`. + /// + /// + /// public static Tensor restore(Tensor file_pattern, Tensor tensor_name, TF_DataType dt, int preferred_shard = -1, string? name = null) { var _ctx = tf.Context; @@ -797,9 +1180,13 @@ public static Tensor restore(Tensor file_pattern, Tensor tensor_name, TF_DataTyp { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Restore", name, file_pattern, tensor_name, "dt", dt, "preferred_shard", preferred_shard)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Restore", name) { args = new object[] { file_pattern, tensor_name }, attrs = new Dictionary() { ["dt"] = dt, ["preferred_shard"] = preferred_shard } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -814,7 +1201,9 @@ public static Tensor restore(Tensor file_pattern, Tensor tensor_name, TF_DataTyp Dictionary keywords = new(); keywords["file_pattern"] = file_pattern; keywords["tensor_name"] = tensor_name; - keywords["dt"] = dt; keywords["preferred_shard"] = preferred_shard; var _op = tf.OpDefLib._apply_op_helper("Restore", name, keywords); + keywords["dt"] = dt; + keywords["preferred_shard"] = preferred_shard; + var _op = tf.OpDefLib._apply_op_helper("Restore", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -835,6 +1224,34 @@ public static Tensor restore_eager_fallback(Tensor file_pattern, Tensor tensor_n } return _result[0]; } + /// + /// Restores a tensor from checkpoint files. + /// + /// + /// + /// This is like `Restore` except that restored tensor can be listed as filling + /// only a slice of a larger tensor. `shape_and_slice` specifies the shape of the + /// larger tensor and the slice that the restored tensor covers. + /// + /// The `shape_and_slice` input has the same format as the + /// elements of the `shapes_and_slices` input of the `SaveSlices` op. + /// + /// + /// + /// + /// + /// + /// + /// The type of the tensor to be restored. + /// + /// + /// + /// + /// Index of file to open first if multiple files match + /// `file_pattern`. See the documentation for `Restore`. + /// + /// + /// public static Tensor restore_slice(Tensor file_pattern, Tensor tensor_name, Tensor shape_and_slice, TF_DataType dt, int preferred_shard = -1, string? name = null) { var _ctx = tf.Context; @@ -842,9 +1259,13 @@ public static Tensor restore_slice(Tensor file_pattern, Tensor tensor_name, Tens { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreSlice", name, file_pattern, tensor_name, shape_and_slice, "dt", dt, "preferred_shard", preferred_shard)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreSlice", name) { args = new object[] { file_pattern, tensor_name, shape_and_slice }, attrs = new Dictionary() { ["dt"] = dt, ["preferred_shard"] = preferred_shard } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -860,7 +1281,9 @@ public static Tensor restore_slice(Tensor file_pattern, Tensor tensor_name, Tens keywords["file_pattern"] = file_pattern; keywords["tensor_name"] = tensor_name; keywords["shape_and_slice"] = shape_and_slice; - keywords["dt"] = dt; keywords["preferred_shard"] = preferred_shard; var _op = tf.OpDefLib._apply_op_helper("RestoreSlice", name, keywords); + keywords["dt"] = dt; + keywords["preferred_shard"] = preferred_shard; + var _op = tf.OpDefLib._apply_op_helper("RestoreSlice", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -881,15 +1304,49 @@ public static Tensor restore_slice_eager_fallback(Tensor file_pattern, Tensor te } return _result[0]; } - public static Tensor restore_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string? name = null) + /// + /// Restores tensors from a V2 checkpoint. + /// + /// + /// + /// For backward compatibility with the V1 format, this Op currently allows + /// restoring from a V1 checkpoint as well: + /// - This Op first attempts to find the V2 index file pointed to by "prefix", and + /// if found proceed to read it as a V2 checkpoint; + /// - Otherwise the V1 read path is invoked. + /// Relying on this behavior is not recommended, as the ability to fall back to read + /// V1 might be deprecated and eventually removed. + /// + /// By default, restores the named tensors in full. If the caller wishes to restore + /// specific slices of stored tensors, "shape_and_slices" should be non-empty + /// strings and correspondingly well-formed. + /// + /// Callers must ensure all the named tensors are indeed stored in the checkpoint. + /// + /// + /// + /// + /// + /// + /// + /// shape {N}. The list of expected dtype for the tensors. Must match + /// those stored in the checkpoint. + /// + /// + /// + public static Tensor[] restore_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string? name = null) { var _ctx = tf.Context; if (_ctx.executing_eagerly()) { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreV2", name, prefix, tensor_names, shape_and_slices, "dtypes", dtypes)); - return _fast_path_result[0]; + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreV2", name) { args = new object[] { prefix, tensor_names, shape_and_slices }, attrs = new Dictionary() { ["dtypes"] = dtypes } }); + return _fast_path_result; + } + catch (NotOkStatusException ex) + { + throw ex; } catch (Exception) { @@ -906,43 +1363,63 @@ public static Tensor restore_v2(Tensor prefix, Tensor tensor_names, Tensor shape keywords["prefix"] = prefix; keywords["tensor_names"] = tensor_names; keywords["shape_and_slices"] = shape_and_slices; - keywords["dtypes"] = dtypes; var _op = tf.OpDefLib._apply_op_helper("RestoreV2", name, keywords); + keywords["dtypes"] = dtypes; + var _op = tf.OpDefLib._apply_op_helper("RestoreV2", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { object[] _attrs = new object[] { "dtypes", _op.get_attr("dtypes") }; _execute.record_gradient("RestoreV2", _op.inputs, _attrs, _result); } - return _result[0]; + return _result; } - public static Tensor restore_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string name, Context ctx) + public static Tensor[] restore_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string name, Context ctx) { Tensor[] _inputs_flat = new Tensor[] { prefix, tensor_names, shape_and_slices }; - object[] _attrs = new object[] { "dtypes", dtypes }; + object[] _attrs = new object[] { }; var _result = _execute.execute("RestoreV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); if (_execute.must_record_gradient()) { _execute.record_gradient("RestoreV2", _inputs_flat, _attrs, _result); } - return _result[0]; + return _result; } - public static Operation save(Tensor filename, Tensor tensor_names, Tensor data, TF_DataType[] T, string? name = null) + /// + /// Saves the input tensors to disk. + /// + /// + /// + /// The size of `tensor_names` must match the number of tensors in `data`. `data[i]` + /// is written to `filename` with name `tensor_names[i]`. + /// + /// See also `SaveSlices`. + /// + /// + /// + /// + /// + /// + public static Operation save(Tensor filename, Tensor tensor_names, Tensors data, string? name = null) { var _ctx = tf.Context; if (_ctx.executing_eagerly()) { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Save", name, filename, tensor_names, data, "T", T)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Save", name) { args = new object[] { filename, tensor_names, data }, attrs = new Dictionary() { } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } try { - return save_eager_fallback(filename, tensor_names, data, T: T, name: name, ctx: _ctx); + return save_eager_fallback(filename, tensor_names, data, name: name, ctx: _ctx); } catch (Exception) { @@ -952,7 +1429,7 @@ public static Operation save(Tensor filename, Tensor tensor_names, Tensor data, keywords["filename"] = filename; keywords["tensor_names"] = tensor_names; keywords["data"] = data; - keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("Save", name, keywords); + var _op = tf.OpDefLib._apply_op_helper("Save", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -962,10 +1439,10 @@ public static Operation save(Tensor filename, Tensor tensor_names, Tensor data, return _op; } - public static Tensor save_eager_fallback(Tensor filename, Tensor tensor_names, Tensor data, TF_DataType[] T, string name, Context ctx) + public static Operation save_eager_fallback(Tensor filename, Tensor tensor_names, Tensor data, string name, Context ctx) { Tensor[] _inputs_flat = new Tensor[] { filename, tensor_names, data }; - object[] _attrs = new object[] { "T", T }; + object[] _attrs = new object[] { }; var _result = _execute.execute("Save", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); if (_execute.must_record_gradient()) { @@ -973,22 +1450,59 @@ public static Tensor save_eager_fallback(Tensor filename, Tensor tensor_names, T } return null; } - public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, TF_DataType[] T, string? name = null) + /// + /// Saves input tensors slices to disk. + /// + /// + /// + /// This is like `Save` except that tensors can be listed in the saved file as being + /// a slice of a larger tensor. `shapes_and_slices` specifies the shape of the + /// larger tensor and the slice that this tensor covers. `shapes_and_slices` must + /// have as many elements as `tensor_names`. + /// + /// Elements of the `shapes_and_slices` input must either be: + /// + /// * The empty string, in which case the corresponding tensor is + /// saved normally. + /// * A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the + /// `dimI` are the dimensions of the larger tensor and `slice-spec` + /// specifies what part is covered by the tensor to save. + /// + /// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` + /// where each `sliceI` is either: + /// + /// * The string `-` meaning that the slice covers all indices of this dimension + /// * `start,length` where `start` and `length` are integers. In that + /// case the slice covers `length` indices starting at `start`. + /// + /// See also `Save`. + /// + /// + /// + /// + /// + /// + /// + public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensors data, string? name = null) { var _ctx = tf.Context; if (_ctx.executing_eagerly()) { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveSlices", name, filename, tensor_names, shapes_and_slices, data, "T", T)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveSlices", name) { args = new object[] { filename, tensor_names, shapes_and_slices, data }, attrs = new Dictionary() { } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } try { - return save_slices_eager_fallback(filename, tensor_names, shapes_and_slices, data, T: T, name: name, ctx: _ctx); + return save_slices_eager_fallback(filename, tensor_names, shapes_and_slices, data, name: name, ctx: _ctx); } catch (Exception) { @@ -999,7 +1513,7 @@ public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor keywords["tensor_names"] = tensor_names; keywords["shapes_and_slices"] = shapes_and_slices; keywords["data"] = data; - keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("SaveSlices", name, keywords); + var _op = tf.OpDefLib._apply_op_helper("SaveSlices", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -1009,10 +1523,10 @@ public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor return _op; } - public static Tensor save_slices_eager_fallback(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, TF_DataType[] T, string name, Context ctx) + public static Operation save_slices_eager_fallback(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, string name, Context ctx) { Tensor[] _inputs_flat = new Tensor[] { filename, tensor_names, shapes_and_slices, data }; - object[] _attrs = new object[] { "T", T }; + object[] _attrs = new object[] { }; var _result = _execute.execute("SaveSlices", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); if (_execute.must_record_gradient()) { @@ -1020,22 +1534,41 @@ public static Tensor save_slices_eager_fallback(Tensor filename, Tensor tensor_n } return null; } - public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, TF_DataType[] dtypes, string? name = null) + /// + /// Saves tensors in V2 checkpoint format. + /// + /// + /// + /// By default, saves the named tensors in full. If the caller wishes to save + /// specific slices of full tensors, "shape_and_slices" should be non-empty strings + /// and correspondingly well-formed. + /// + /// + /// + /// + /// + /// + /// + public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensors tensors, string? name = null) { var _ctx = tf.Context; if (_ctx.executing_eagerly()) { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveV2", name, prefix, tensor_names, shape_and_slices, tensors, "dtypes", dtypes)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveV2", name) { args = new object[] { prefix, tensor_names, shape_and_slices, tensors }, attrs = new Dictionary() { } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } try { - return save_v2_eager_fallback(prefix, tensor_names, shape_and_slices, tensors, dtypes: dtypes, name: name, ctx: _ctx); + return save_v2_eager_fallback(prefix, tensor_names, shape_and_slices, tensors, name: name, ctx: _ctx); } catch (Exception) { @@ -1046,7 +1579,7 @@ public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape keywords["tensor_names"] = tensor_names; keywords["shape_and_slices"] = shape_and_slices; keywords["tensors"] = tensors; - keywords["dtypes"] = dtypes; var _op = tf.OpDefLib._apply_op_helper("SaveV2", name, keywords); + var _op = tf.OpDefLib._apply_op_helper("SaveV2", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -1056,10 +1589,10 @@ public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape return _op; } - public static Tensor save_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, TF_DataType[] dtypes, string name, Context ctx) + public static Operation save_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, string name, Context ctx) { Tensor[] _inputs_flat = new Tensor[] { prefix, tensor_names, shape_and_slices, tensors }; - object[] _attrs = new object[] { "dtypes", dtypes }; + object[] _attrs = new object[] { }; var _result = _execute.execute("SaveV2", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); if (_execute.must_record_gradient()) { @@ -1067,6 +1600,18 @@ public static Tensor save_v2_eager_fallback(Tensor prefix, Tensor tensor_names, } return null; } + /// + /// Generate a sharded filename. The filename is printf formatted as + /// + /// + /// + /// %s-%05d-of-%05d, basename, shard, num_shards. + /// + /// + /// + /// + /// + /// public static Tensor sharded_filename(Tensor basename, Tensor shard, Tensor num_shards, string? name = null) { var _ctx = tf.Context; @@ -1074,9 +1619,13 @@ public static Tensor sharded_filename(Tensor basename, Tensor shard, Tensor num_ { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilename", name, basename, shard, num_shards)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilename", name) { args = new object[] { basename, shard, num_shards }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1113,6 +1662,12 @@ public static Tensor sharded_filename_eager_fallback(Tensor basename, Tensor sha } return _result[0]; } + /// + /// Generate a glob pattern matching all sharded file names. + /// + /// + /// + /// public static Tensor sharded_filespec(Tensor basename, Tensor num_shards, string? name = null) { var _ctx = tf.Context; @@ -1120,9 +1675,13 @@ public static Tensor sharded_filespec(Tensor basename, Tensor num_shards, string { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilespec", name, basename, num_shards)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilespec", name) { args = new object[] { basename, num_shards }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1158,6 +1717,27 @@ public static Tensor sharded_filespec_eager_fallback(Tensor basename, Tensor num } return _result[0]; } + /// + /// A Reader that outputs the lines of a file delimited by '\n'. + /// + /// + /// + /// Number of lines to skip from the beginning of every file. + /// + /// + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// public static Tensor text_line_reader(int skip_header_lines = 0, string container = "", string shared_name = "", string? name = null) { var _ctx = tf.Context; @@ -1165,9 +1745,13 @@ public static Tensor text_line_reader(int skip_header_lines = 0, string containe { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReader", name, "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReader", name) { args = new object[] { }, attrs = new Dictionary() { ["skip_header_lines"] = skip_header_lines, ["container"] = container, ["shared_name"] = shared_name } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1179,8 +1763,19 @@ public static Tensor text_line_reader(int skip_header_lines = 0, string containe { } } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } Dictionary keywords = new(); - keywords["skip_header_lines"] = skip_header_lines; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("TextLineReader", name, keywords); + keywords["skip_header_lines"] = skip_header_lines; + keywords["container"] = container; + keywords["shared_name"] = shared_name; + var _op = tf.OpDefLib._apply_op_helper("TextLineReader", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -1201,6 +1796,27 @@ public static Tensor text_line_reader_eager_fallback(int skip_header_lines, stri } return _result[0]; } + /// + /// A Reader that outputs the lines of a file delimited by '\n'. + /// + /// + /// + /// Number of lines to skip from the beginning of every file. + /// + /// + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// public static Tensor text_line_reader_v2(int skip_header_lines = 0, string container = "", string shared_name = "", string? name = null) { var _ctx = tf.Context; @@ -1208,9 +1824,13 @@ public static Tensor text_line_reader_v2(int skip_header_lines = 0, string conta { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReaderV2", name, "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReaderV2", name) { args = new object[] { }, attrs = new Dictionary() { ["skip_header_lines"] = skip_header_lines, ["container"] = container, ["shared_name"] = shared_name } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1222,8 +1842,19 @@ public static Tensor text_line_reader_v2(int skip_header_lines = 0, string conta { } } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } Dictionary keywords = new(); - keywords["skip_header_lines"] = skip_header_lines; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("TextLineReaderV2", name, keywords); + keywords["skip_header_lines"] = skip_header_lines; + keywords["container"] = container; + keywords["shared_name"] = shared_name; + var _op = tf.OpDefLib._apply_op_helper("TextLineReaderV2", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -1244,6 +1875,28 @@ public static Tensor text_line_reader_v2_eager_fallback(int skip_header_lines, s } return _result[0]; } + /// + /// A Reader that outputs the entire contents of a file as a value. + /// + /// + /// + /// To use, enqueue filenames in a Queue. The output of ReaderRead will + /// be a filename (key) and the contents of that file (value). + /// + /// + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// public static Tensor whole_file_reader(string container = "", string shared_name = "", string? name = null) { var _ctx = tf.Context; @@ -1251,9 +1904,13 @@ public static Tensor whole_file_reader(string container = "", string shared_name { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReader", name, "container", container, "shared_name", shared_name)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReader", name) { args = new object[] { }, attrs = new Dictionary() { ["container"] = container, ["shared_name"] = shared_name } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1265,8 +1922,18 @@ public static Tensor whole_file_reader(string container = "", string shared_name { } } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } Dictionary keywords = new(); - keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("WholeFileReader", name, keywords); + keywords["container"] = container; + keywords["shared_name"] = shared_name; + var _op = tf.OpDefLib._apply_op_helper("WholeFileReader", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -1287,6 +1954,28 @@ public static Tensor whole_file_reader_eager_fallback(string container, string s } return _result[0]; } + /// + /// A Reader that outputs the entire contents of a file as a value. + /// + /// + /// + /// To use, enqueue filenames in a Queue. The output of ReaderRead will + /// be a filename (key) and the contents of that file (value). + /// + /// + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// public static Tensor whole_file_reader_v2(string container = "", string shared_name = "", string? name = null) { var _ctx = tf.Context; @@ -1294,9 +1983,13 @@ public static Tensor whole_file_reader_v2(string container = "", string shared_n { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReaderV2", name, "container", container, "shared_name", shared_name)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReaderV2", name) { args = new object[] { }, attrs = new Dictionary() { ["container"] = container, ["shared_name"] = shared_name } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1308,8 +2001,18 @@ public static Tensor whole_file_reader_v2(string container = "", string shared_n { } } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } Dictionary keywords = new(); - keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("WholeFileReaderV2", name, keywords); + keywords["container"] = container; + keywords["shared_name"] = shared_name; + var _op = tf.OpDefLib._apply_op_helper("WholeFileReaderV2", name, keywords); var _result = _op.outputs; if (_execute.must_record_gradient()) { @@ -1330,6 +2033,17 @@ public static Tensor whole_file_reader_v2_eager_fallback(string container, strin } return _result[0]; } + /// + /// Writes `contents` to the file at input `filename`. + /// + /// + /// + /// Creates the file and recursively creates directory if it does not exist. + /// + /// + /// + /// + /// public static Operation write_file(Tensor filename, Tensor contents, string? name = null) { var _ctx = tf.Context; @@ -1337,9 +2051,13 @@ public static Operation write_file(Tensor filename, Tensor contents, string? nam { try { - var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WriteFile", name, filename, contents)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WriteFile", name) { args = new object[] { filename, contents }, attrs = new Dictionary() { } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1364,7 +2082,7 @@ public static Operation write_file(Tensor filename, Tensor contents, string? nam return _op; } - public static Tensor write_file_eager_fallback(Tensor filename, Tensor contents, string name, Context ctx) + public static Operation write_file_eager_fallback(Tensor filename, Tensor contents, string name, Context ctx) { Tensor[] _inputs_flat = new Tensor[] { filename, contents }; object[] _attrs = new object[] { }; diff --git a/src/TensorFlowNET.Core/Operations/gen_list_ops.cs b/src/TensorFlowNET.Core/Operations/gen_list_ops.cs index e72539866..59c783b24 100644 --- a/src/TensorFlowNET.Core/Operations/gen_list_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_list_ops.cs @@ -2,6 +2,7 @@ using Tensorflow.Eager; using Tensorflow.Contexts; +using Tensorflow.Exceptions; using static Tensorflow.Binding; namespace Tensorflow; @@ -35,6 +36,10 @@ public static Tensor empty_tensor_list(Tensor element_shape, Tensor max_num_elem var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EmptyTensorList", name) { args = new object[] { element_shape, max_num_elements }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -98,6 +103,10 @@ public static Tensor[] tensor_list_concat(Tensor input_handle, TF_DataType eleme var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcat", name) { args = new object[] { input_handle }, attrs = new Dictionary() { ["element_dtype"] = element_dtype, ["element_shape"] = element_shape } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -151,6 +160,10 @@ public static Tensor tensor_list_concat_lists(Tensor input_a, Tensor input_b, TF var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcatLists", name) { args = new object[] { input_a, input_b }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -221,6 +234,10 @@ public static Tensor[] tensor_list_concat_v2(Tensor input_handle, Tensor element var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcatV2", name) { args = new object[] { input_handle, element_shape, leading_dims }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -280,6 +297,10 @@ public static Tensor tensor_list_element_shape(Tensor input_handle, TF_DataType var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListElementShape", name) { args = new object[] { input_handle }, attrs = new Dictionary() { ["shape_type"] = shape_type } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -339,6 +360,10 @@ public static Tensor tensor_list_from_tensor(Tensor tensor, Tensor element_shape var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListFromTensor", name) { args = new object[] { tensor, element_shape }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -402,6 +427,10 @@ public static Tensor tensor_list_gather(Tensor input_handle, Tensor indices, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListGather", name) { args = new object[] { input_handle, indices, element_shape }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -457,6 +486,10 @@ public static Tensor tensor_list_get_item(Tensor input_handle, Tensor index, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListGetItem", name) { args = new object[] { input_handle, index, element_shape }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -515,6 +548,10 @@ public static Tensor tensor_list_length(Tensor input_handle, string? name = null var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListLength", name) { args = new object[] { input_handle }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -576,6 +613,10 @@ public static Tensor[] tensor_list_pop_back(Tensor input_handle, Tensor element_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPopBack", name) { args = new object[] { input_handle, element_shape }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -637,6 +678,10 @@ public static Tensor tensor_list_push_back(Tensor input_handle, Tensor tensor, s var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPushBack", name) { args = new object[] { input_handle, tensor }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -688,6 +733,10 @@ public static Tensor tensor_list_push_back_batch(Tensor input_handles, Tensor te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPushBackBatch", name) { args = new object[] { input_handles, tensor }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -748,6 +797,10 @@ public static Tensor tensor_list_reserve(Tensor element_shape, Tensor num_elemen var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListReserve", name) { args = new object[] { element_shape, num_elements }, attrs = new Dictionary() { ["element_dtype"] = element_dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -808,6 +861,10 @@ public static Tensor tensor_list_resize(Tensor input_handle, Tensor size, string var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListResize", name) { args = new object[] { input_handle, size }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -872,6 +929,10 @@ public static Tensor tensor_list_scatter(Tensor tensor, Tensor indices, Tensor e var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatter", name) { args = new object[] { tensor, indices, element_shape }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -936,6 +997,10 @@ public static Tensor tensor_list_scatter_into_existing_list(Tensor input_handle, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatterIntoExistingList", name) { args = new object[] { input_handle, tensor, indices }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1005,6 +1070,10 @@ public static Tensor tensor_list_scatter_v2(Tensor tensor, Tensor indices, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatterV2", name) { args = new object[] { tensor, indices, element_shape, num_elements }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1059,6 +1128,10 @@ public static Tensor tensor_list_set_item(Tensor input_handle, Tensor index, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListSetItem", name) { args = new object[] { input_handle, index, item }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1123,6 +1196,10 @@ public static Tensor tensor_list_split(Tensor tensor, Tensor element_shape, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListSplit", name) { args = new object[] { tensor, element_shape, lengths }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1187,6 +1264,10 @@ public static Tensor tensor_list_stack(Tensor input_handle, Tensor element_shape var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListStack", name) { args = new object[] { input_handle, element_shape }, attrs = new Dictionary() { ["element_dtype"] = element_dtype, ["num_elements"] = num_elements } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 6eb7a4116..a8152a11e 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -2,6 +2,7 @@ using Tensorflow.Eager; using Tensorflow.Contexts; +using Tensorflow.Exceptions; using static Tensorflow.Binding; namespace Tensorflow; @@ -30,6 +31,10 @@ public static Tensor abs(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Abs", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -96,6 +101,10 @@ public static Tensor accumulate_nv2(Tensors inputs, Shape shape, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AccumulateNV2", name) { args = new object[] { inputs }, attrs = new Dictionary() { ["shape"] = shape } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -157,6 +166,10 @@ public static Tensor acos(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Acos", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -217,6 +230,10 @@ public static Tensor acosh(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Acosh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -278,6 +295,10 @@ public static Tensor add(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Add", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -338,6 +359,10 @@ public static Tensor add_n(Tensors inputs, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AddN", name) { args = new object[] { inputs }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -396,6 +421,10 @@ public static Tensor add_v2(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AddV2", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -460,6 +489,10 @@ public static Tensor all(Tensor input, Tensor reduction_indices, bool keep_dims var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "All", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -533,6 +566,10 @@ public static Tensor angle(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Angle", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -597,6 +634,10 @@ public static Tensor any(Tensor input, Tensor reduction_indices, bool keep_dims var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Any", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -650,6 +691,10 @@ public static Tensor approximate_equal(Tensor x, Tensor y, float tolerance = 1E- var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ApproximateEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["tolerance"] = tolerance } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -718,6 +763,10 @@ public static Tensor arg_max(Tensor input, Tensor dimension, TF_DataType output_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ArgMax", name) { args = new object[] { input, dimension }, attrs = new Dictionary() { ["output_type"] = output_type } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -786,6 +835,10 @@ public static Tensor arg_min(Tensor input, Tensor dimension, TF_DataType output_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ArgMin", name) { args = new object[] { input, dimension }, attrs = new Dictionary() { ["output_type"] = output_type } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -857,6 +910,10 @@ public static Tensor asin(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Asin", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -918,6 +975,10 @@ public static Tensor asinh(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Asinh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -987,6 +1048,10 @@ public static Tensor atan(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1055,6 +1120,10 @@ public static Tensor atan2(Tensor y, Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atan2", name) { args = new object[] { y, x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1119,6 +1188,10 @@ public static Tensor atanh(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atanh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1201,6 +1274,10 @@ public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMul", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1291,6 +1368,10 @@ public static Tensor batch_mat_mul_v2(Tensor x, Tensor y, bool adj_x = false, bo var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMulV2", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1386,6 +1467,10 @@ public static Tensor batch_mat_mul_v3(Tensor x, Tensor y, TF_DataType Tout, bool var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMulV3", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["Tout"] = Tout, ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1458,6 +1543,10 @@ public static Tensor betainc(Tensor a, Tensor b, Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Betainc", name) { args = new object[] { a, b, x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1522,6 +1611,10 @@ public static Tensor bincount(Tensor arr, Tensor size, Tensor weights, string? n var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bincount", name) { args = new object[] { arr, size, weights }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1592,6 +1685,10 @@ public static Tensor bucketize(Tensor input, float[] boundaries, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bucketize", name) { args = new object[] { input }, attrs = new Dictionary() { ["boundaries"] = boundaries } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1644,6 +1741,10 @@ public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate = false, str var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cast", name) { args = new object[] { x }, attrs = new Dictionary() { ["DstT"] = DstT, ["Truncate"] = Truncate } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1695,6 +1796,10 @@ public static Tensor ceil(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Ceil", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1754,6 +1859,10 @@ public static Tensor clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ClipByValue", name) { args = new object[] { t, clip_value_min, clip_value_max }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1825,6 +1934,10 @@ public static Tensor complex(Tensor real, Tensor imag, TF_DataType Tout = TF_Dat var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Complex", name) { args = new object[] { real, imag }, attrs = new Dictionary() { ["Tout"] = Tout } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1892,6 +2005,10 @@ public static Tensor complex_abs(Tensor x, TF_DataType Tout = TF_DataType.TF_FLO var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ComplexAbs", name) { args = new object[] { x }, attrs = new Dictionary() { ["Tout"] = Tout } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1959,6 +2076,10 @@ public static Tensor conj(Tensor input, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conj", name) { args = new object[] { input }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2021,6 +2142,10 @@ public static Tensor cos(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cos", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2082,6 +2207,10 @@ public static Tensor cosh(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cosh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2139,6 +2268,10 @@ public static Tensor cross(Tensor a, Tensor b, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cross", name) { args = new object[] { a, b }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2232,6 +2365,10 @@ public static Tensor cumprod(Tensor x, Tensor axis, bool exclusive = false, bool var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cumprod", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2327,6 +2464,10 @@ public static Tensor cumsum(Tensor x, Tensor axis, bool exclusive = false, bool var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cumsum", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2412,6 +2553,10 @@ public static Tensor cumulative_logsumexp(Tensor x, Tensor axis, bool exclusive var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CumulativeLogsumexp", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2482,6 +2627,10 @@ public static Tensor dense_bincount(Tensor input, Tensor size, Tensor weights, b var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DenseBincount", name) { args = new object[] { input, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2539,6 +2688,10 @@ public static Tensor digamma(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Digamma", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2595,6 +2748,10 @@ public static Tensor div(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Div", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2653,6 +2810,10 @@ public static Tensor div_no_nan(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DivNoNan", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2721,6 +2882,10 @@ public static Tensor equal(Tensor x, Tensor y, bool incompatible_shape_error = t var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Equal", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["incompatible_shape_error"] = incompatible_shape_error } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2772,6 +2937,10 @@ public static Tensor erf(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erf", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2821,6 +2990,10 @@ public static Tensor erfc(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erfc", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2870,6 +3043,10 @@ public static Tensor erfinv(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erfinv", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2933,6 +3110,10 @@ public static Tensor euclidean_norm(Tensor input, Tensor reduction_indices, bool var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EuclideanNorm", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3014,6 +3195,10 @@ public static Tensor exp(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Exp", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3080,6 +3265,10 @@ public static Tensor expm1(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Expm1", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3129,6 +3318,10 @@ public static Tensor floor(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Floor", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3185,6 +3378,10 @@ public static Tensor floor_div(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FloorDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3246,6 +3443,10 @@ public static Tensor floor_mod(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FloorMod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3315,6 +3516,10 @@ public static Tensor greater(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Greater", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3384,6 +3589,10 @@ public static Tensor greater_equal(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GreaterEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3456,6 +3665,10 @@ public static Tensor histogram_fixed_width(Tensor values, Tensor value_range, Te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "HistogramFixedWidth", name) { args = new object[] { values, value_range, nbins }, attrs = new Dictionary() { ["dtype"] = dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3526,6 +3739,10 @@ public static Tensor igamma(Tensor a, Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Igamma", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3577,6 +3794,10 @@ public static Tensor igamma_grad_a(Tensor a, Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IgammaGradA", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3644,6 +3865,10 @@ public static Tensor igammac(Tensor a, Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Igammac", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3710,6 +3935,10 @@ public static Tensor imag(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Imag", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3765,6 +3994,10 @@ public static Tensor inv(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Inv", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3821,6 +4054,10 @@ public static Tensor inv_grad(Tensor y, Tensor dy, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3885,6 +4122,10 @@ public static Tensor is_finite(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsFinite", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3948,6 +4189,10 @@ public static Tensor is_inf(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsInf", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4011,6 +4256,10 @@ public static Tensor is_nan(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsNan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4079,6 +4328,10 @@ public static Tensor less(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Less", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4148,6 +4401,10 @@ public static Tensor less_equal(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LessEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4211,6 +4468,10 @@ public static Tensor lgamma(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Lgamma", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4275,6 +4536,10 @@ public static Tensor lin_space(Tensor start, Tensor stop, Tensor num, string? na var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LinSpace", name) { args = new object[] { start, stop, num }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4338,6 +4603,10 @@ public static Tensor log(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Log", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4399,6 +4668,10 @@ public static Tensor log1p(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Log1p", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4455,6 +4728,10 @@ public static Tensor logical_and(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalAnd", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4505,6 +4782,10 @@ public static Tensor logical_not(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalNot", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4561,6 +4842,10 @@ public static Tensor logical_or(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalOr", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4633,9 +4918,12 @@ public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatMul", name) { args = new object[] { a, b }, attrs = new Dictionary() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b } }); return _fast_path_result[0]; } - catch (Exception ex) + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) { - Console.WriteLine(); } try { @@ -4700,6 +4988,10 @@ public static Tensor max(Tensor input, Tensor reduction_indices, bool keep_dims var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Max", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4758,6 +5050,10 @@ public static Tensor maximum(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Maximum", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4822,6 +5118,10 @@ public static Tensor mean(Tensor input, Tensor reduction_indices, bool keep_dims var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mean", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4887,6 +5187,10 @@ public static Tensor min(Tensor input, Tensor reduction_indices, bool keep_dims var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Min", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4945,6 +5249,10 @@ public static Tensor minimum(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Minimum", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5005,6 +5313,10 @@ public static Tensor mod(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5062,6 +5374,10 @@ public static Tensor mul(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mul", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5119,6 +5435,10 @@ public static Tensor mul_no_nan(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MulNoNan", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5169,6 +5489,10 @@ public static Tensor ndtri(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Ndtri", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5223,6 +5547,10 @@ public static Tensor neg(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Neg", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5284,6 +5612,10 @@ public static Tensor next_after(Tensor x1, Tensor x2, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NextAfter", name) { args = new object[] { x1, x2 }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5342,6 +5674,10 @@ public static Tensor not_equal(Tensor x, Tensor y, bool incompatible_shape_error var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NotEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["incompatible_shape_error"] = incompatible_shape_error } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5405,6 +5741,10 @@ public static Tensor polygamma(Tensor a, Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Polygamma", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5468,6 +5808,10 @@ public static Tensor pow(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pow", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5532,6 +5876,10 @@ public static Tensor prod(Tensor input, Tensor reduction_indices, bool keep_dims var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Prod", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5616,6 +5964,10 @@ public static Tensor[] quantize_down_and_shrink_range(Tensor input, Tensor input var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeDownAndShrinkRange", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5674,6 +6026,10 @@ public static Tensor[] quantized_add(Tensor x, Tensor y, Tensor min_x, Tensor ma var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedAdd", name) { args = new object[] { x, y, min_x, max_x, min_y, max_y }, attrs = new Dictionary() { ["Toutput"] = Toutput } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5759,6 +6115,10 @@ public static Tensor[] quantized_mat_mul(Tensor a, Tensor b, Tensor min_a, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMul", name) { args = new object[] { a, b, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["Tactivation"] = Tactivation } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5823,6 +6183,10 @@ public static Tensor[] quantized_mul(Tensor x, Tensor y, Tensor min_x, Tensor ma var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMul", name) { args = new object[] { x, y, min_x, max_x, min_y, max_y }, attrs = new Dictionary() { ["Toutput"] = Toutput } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5897,6 +6261,10 @@ public static Tensor ragged_bincount(Tensor splits, Tensor values, Tensor size, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RaggedBincount", name) { args = new object[] { splits, values, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5967,6 +6335,10 @@ public static Tensor range(Tensor start, Tensor limit, Tensor delta, string? nam var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Range", name) { args = new object[] { start, limit, delta }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6034,6 +6406,10 @@ public static Tensor real(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Real", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6093,6 +6469,10 @@ public static Tensor real_div(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RealDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6148,6 +6528,10 @@ public static Tensor reciprocal(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reciprocal", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6204,6 +6588,10 @@ public static Tensor reciprocal_grad(Tensor y, Tensor dy, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReciprocalGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6264,6 +6652,10 @@ public static Tensor[] requantization_range(Tensor input, Tensor input_min, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizationRange", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6323,6 +6715,10 @@ public static Tensor[] requantization_range_per_channel(Tensor input, Tensor inp var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizationRangePerChannel", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["clip_value_max"] = clip_value_max } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6395,6 +6791,10 @@ public static Tensor[] requantize(Tensor input, Tensor input_min, Tensor input_m var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Requantize", name) { args = new object[] { input, input_min, input_max, requested_output_min, requested_output_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6458,6 +6858,10 @@ public static Tensor[] requantize_per_channel(Tensor input, Tensor input_min, Te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizePerChannel", name) { args = new object[] { input, input_min, input_max, requested_output_min, requested_output_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6525,6 +6929,10 @@ public static Tensor rint(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rint", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6580,6 +6988,10 @@ public static Tensor round(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Round", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6634,6 +7046,10 @@ public static Tensor rsqrt(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rsqrt", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6690,6 +7106,10 @@ public static Tensor rsqrt_grad(Tensor y, Tensor dy, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RsqrtGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6772,6 +7192,10 @@ public static Tensor segment_max(Tensor data, Tensor segment_ids, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMax", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6856,6 +7280,10 @@ public static Tensor segment_mean(Tensor data, Tensor segment_ids, string? name var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMean", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6938,6 +7366,10 @@ public static Tensor segment_min(Tensor data, Tensor segment_ids, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMin", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7020,6 +7452,10 @@ public static Tensor segment_prod(Tensor data, Tensor segment_ids, string? name var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentProd", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7102,6 +7538,10 @@ public static Tensor segment_sum(Tensor data, Tensor segment_ids, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentSum", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7196,6 +7636,10 @@ public static Tensor select(Tensor condition, Tensor t, Tensor e, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Select", name) { args = new object[] { condition, t, e }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7249,6 +7693,10 @@ public static Tensor select_v2(Tensor condition, Tensor t, Tensor e, string? nam var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SelectV2", name) { args = new object[] { condition, t, e }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7305,6 +7753,10 @@ public static Tensor sigmoid(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sigmoid", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7361,6 +7813,10 @@ public static Tensor sigmoid_grad(Tensor y, Tensor dy, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SigmoidGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7422,6 +7878,10 @@ public static Tensor sign(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sign", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7483,6 +7943,10 @@ public static Tensor sin(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sin", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7544,6 +8008,10 @@ public static Tensor sinh(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sinh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7606,6 +8074,10 @@ public static Tensor sobol_sample(Tensor dim, Tensor num_results, Tensor skip, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SobolSample", name) { args = new object[] { dim, num_results, skip }, attrs = new Dictionary() { ["dtype"] = dtype } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7678,6 +8150,10 @@ public static Tensor sparse_bincount(Tensor indices, Tensor values, Tensor dense var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseBincount", name) { args = new object[] { indices, values, dense_shape, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7750,6 +8226,10 @@ public static Tensor sparse_mat_mul(Tensor a, Tensor b, bool transpose_a = false var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseMatMul", name) { args = new object[] { a, b }, attrs = new Dictionary() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["a_is_sparse"] = a_is_sparse, ["b_is_sparse"] = b_is_sparse } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7814,6 +8294,10 @@ public static Tensor sparse_segment_mean(Tensor data, Tensor indices, Tensor seg var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMean", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7874,6 +8358,10 @@ public static Tensor sparse_segment_mean_grad(Tensor grad, Tensor indices, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMeanGrad", name) { args = new object[] { grad, indices, segment_ids, output_dim0 }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7939,6 +8427,10 @@ public static Tensor sparse_segment_mean_with_num_segments(Tensor data, Tensor i var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMeanWithNumSegments", name) { args = new object[] { data, indices, segment_ids, num_segments }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8001,6 +8493,10 @@ public static Tensor sparse_segment_sqrt_n(Tensor data, Tensor indices, Tensor s var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSqrtN", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8087,6 +8583,10 @@ public static Tensor sparse_segment_sum(Tensor data, Tensor indices, Tensor segm var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSum", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8147,6 +8647,10 @@ public static Tensor sparse_segment_sum_grad(Tensor grad, Tensor indices, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSumGrad", name) { args = new object[] { grad, indices, segment_ids, output_dim0 }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8233,6 +8737,10 @@ public static Tensor sparse_segment_sum_with_num_segments(Tensor data, Tensor in var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSumWithNumSegments", name) { args = new object[] { data, indices, segment_ids, num_segments }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8290,6 +8798,10 @@ public static Tensor sqrt(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sqrt", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8346,6 +8858,10 @@ public static Tensor sqrt_grad(Tensor y, Tensor dy, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SqrtGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8401,6 +8917,10 @@ public static Tensor square(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Square", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8457,6 +8977,10 @@ public static Tensor squared_difference(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SquaredDifference", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8514,6 +9038,10 @@ public static Tensor sub(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sub", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8578,6 +9106,10 @@ public static Tensor sum(Tensor input, Tensor reduction_indices, bool keep_dims var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sum", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8642,6 +9174,10 @@ public static Tensor tan(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8705,6 +9241,10 @@ public static Tensor tanh(Tensor x, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tanh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8761,6 +9301,10 @@ public static Tensor tanh_grad(Tensor y, Tensor dy, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TanhGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8823,6 +9367,10 @@ public static Tensor truncate_div(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TruncateDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8883,6 +9431,10 @@ public static Tensor truncate_mod(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TruncateMod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8974,6 +9526,10 @@ public static Tensor unsorted_segment_max(Tensor data, Tensor segment_ids, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentMax", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9061,6 +9617,10 @@ public static Tensor unsorted_segment_min(Tensor data, Tensor segment_ids, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentMin", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9147,6 +9707,10 @@ public static Tensor unsorted_segment_prod(Tensor data, Tensor segment_ids, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentProd", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9237,6 +9801,10 @@ public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentSum", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9289,6 +9857,10 @@ public static Tensor xdivy(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xdivy", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9340,6 +9912,10 @@ public static Tensor xlog1py(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xlog1py", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9391,6 +9967,10 @@ public static Tensor xlogy(Tensor x, Tensor y, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xlogy", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -9450,6 +10030,10 @@ public static Tensor zeta(Tensor x, Tensor q, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Zeta", name) { args = new object[] { x, q }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } diff --git a/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs index c0cec2785..59c740c46 100644 --- a/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs @@ -2,6 +2,7 @@ using Tensorflow.Eager; using Tensorflow.Contexts; +using Tensorflow.Exceptions; using static Tensorflow.Binding; namespace Tensorflow; @@ -57,6 +58,10 @@ public static Tensor[] approx_top_k(Tensor input, int k = 0, int reduction_dimen var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ApproxTopK", name) { args = new object[] { input }, attrs = new Dictionary() { ["k"] = k, ["reduction_dimension"] = reduction_dimension, ["recall_target"] = recall_target, ["is_max_k"] = is_max_k, ["reduction_input_size_override"] = reduction_input_size_override, ["aggregate_to_topk"] = aggregate_to_topk } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -142,6 +147,10 @@ public static Tensor avg_pool(Tensor value, int[] ksize, int[] strides, string p var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -231,6 +240,10 @@ public static Tensor avg_pool3d(Tensor input, int[] ksize, int[] strides, string var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool3D", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -315,6 +328,10 @@ public static Tensor avg_pool3d_grad(Tensor orig_input_shape, Tensor grad, int[] var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool3DGrad", name) { args = new object[] { orig_input_shape, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -398,6 +415,10 @@ public static Tensor avg_pool_grad(Tensor orig_input_shape, Tensor grad, int[] k var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPoolGrad", name) { args = new object[] { orig_input_shape, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -476,6 +497,10 @@ public static Tensor batch_norm_with_global_normalization(Tensor t, Tensor m, Te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchNormWithGlobalNormalization", name) { args = new object[] { t, m, v, beta, gamma }, attrs = new Dictionary() { ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -551,6 +576,10 @@ public static Tensor[] batch_norm_with_global_normalization_grad(Tensor t, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchNormWithGlobalNormalizationGrad", name) { args = new object[] { t, m, v, gamma, backprop }, attrs = new Dictionary() { ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -624,6 +653,10 @@ public static Tensor bias_add(Tensor value, Tensor bias, string data_format = "N var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAdd", name) { args = new object[] { value, bias }, attrs = new Dictionary() { ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -697,6 +730,10 @@ public static Tensor bias_add_grad(Tensor out_backprop, string data_format = "NH var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAddGrad", name) { args = new object[] { out_backprop }, attrs = new Dictionary() { ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -760,6 +797,10 @@ public static Tensor bias_add_v1(Tensor value, Tensor bias, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAddV1", name) { args = new object[] { value, bias }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -883,6 +924,10 @@ public static Tensor conv2d(Tensor input, Tensor filter, int[] strides, string p var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -992,6 +1037,10 @@ public static Tensor conv2d_backprop_filter(Tensor input, Tensor filter_sizes, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2DBackpropFilter", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1102,6 +1151,10 @@ public static Tensor conv2d_backprop_input(Tensor input_sizes, Tensor filter, Te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2DBackpropInput", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1206,6 +1259,10 @@ public static Tensor conv3d(Tensor input, Tensor filter, int[] strides, string p var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1282,6 +1339,10 @@ public static Tensor conv3d_backprop_filter(Tensor input, Tensor filter, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropFilter", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1371,6 +1432,10 @@ public static Tensor conv3d_backprop_filter_v2(Tensor input, Tensor filter_sizes var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropFilterV2", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1448,6 +1513,10 @@ public static Tensor conv3d_backprop_input(Tensor input, Tensor filter, Tensor o var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropInput", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1537,6 +1606,10 @@ public static Tensor conv3d_backprop_input_v2(Tensor input_sizes, Tensor filter, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropInputV2", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1611,6 +1684,10 @@ public static Tensor data_format_dim_map(Tensor x, string src_format = "NHWC", s var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DataFormatDimMap", name) { args = new object[] { x }, attrs = new Dictionary() { ["src_format"] = src_format, ["dst_format"] = dst_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1715,6 +1792,10 @@ public static Tensor data_format_vec_permute(Tensor x, string src_format = "NHWC var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DataFormatVecPermute", name) { args = new object[] { x }, attrs = new Dictionary() { ["src_format"] = src_format, ["dst_format"] = dst_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1835,6 +1916,10 @@ public static Tensor depthwise_conv2d_native(Tensor input, Tensor filter, int[] var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNative", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -1934,6 +2019,10 @@ public static Tensor depthwise_conv2d_native_backprop_filter(Tensor input, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNativeBackpropFilter", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2034,6 +2123,10 @@ public static Tensor depthwise_conv2d_native_backprop_input(Tensor input_sizes, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNativeBackpropInput", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2139,6 +2232,10 @@ public static Tensor dilation2d(Tensor input, Tensor filter, int[] strides, int[ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2211,6 +2308,10 @@ public static Tensor dilation2d_backprop_filter(Tensor input, Tensor filter, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2DBackpropFilter", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2284,6 +2385,10 @@ public static Tensor dilation2d_backprop_input(Tensor input, Tensor filter, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2DBackpropInput", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2358,6 +2463,10 @@ public static Tensor elu(Tensor features, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Elu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2408,6 +2517,10 @@ public static Tensor elu_grad(Tensor gradients, Tensor outputs, string? name = n var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EluGrad", name) { args = new object[] { gradients, outputs }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2516,6 +2629,10 @@ public static Tensor[] fractional_avg_pool(Tensor value, float[] pooling_ratio, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalAvgPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["pooling_ratio"] = pooling_ratio, ["pseudo_random"] = pseudo_random, ["overlapping"] = overlapping, ["deterministic"] = deterministic, ["seed"] = seed, ["seed2"] = seed2 } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2596,6 +2713,10 @@ public static Tensor fractional_avg_pool_grad(Tensor orig_input_tensor_shape, Te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalAvgPoolGrad", name) { args = new object[] { orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence }, attrs = new Dictionary() { ["overlapping"] = overlapping } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2731,6 +2852,10 @@ public static Tensor[] fractional_max_pool(Tensor value, float[] pooling_ratio, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalMaxPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["pooling_ratio"] = pooling_ratio, ["pseudo_random"] = pseudo_random, ["overlapping"] = overlapping, ["deterministic"] = deterministic, ["seed"] = seed, ["seed2"] = seed2 } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2803,6 +2928,10 @@ public static Tensor fractional_max_pool_grad(Tensor orig_input, Tensor orig_out var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalMaxPoolGrad", name) { args = new object[] { orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence }, attrs = new Dictionary() { ["overlapping"] = overlapping } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2884,6 +3013,10 @@ public static Tensor[] fused_batch_norm(Tensor x, Tensor scale, Tensor offset, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNorm", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -2972,6 +3105,10 @@ public static Tensor[] fused_batch_norm_grad(Tensor y_backprop, Tensor x, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGrad", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3059,6 +3196,10 @@ public static Tensor[] fused_batch_norm_grad_v2(Tensor y_backprop, Tensor x, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGradV2", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3147,6 +3288,10 @@ public static Tensor[] fused_batch_norm_grad_v3(Tensor y_backprop, Tensor x, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGradV3", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3235,6 +3380,10 @@ public static Tensor[] fused_batch_norm_v2(Tensor x, Tensor scale, Tensor offset var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormV2", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3323,6 +3472,10 @@ public static Tensor[] fused_batch_norm_v3(Tensor x, Tensor scale, Tensor offset var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormV3", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3413,6 +3566,10 @@ public static Tensor fused_pad_conv2d(Tensor input, Tensor paddings, Tensor filt var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedPadConv2D", name) { args = new object[] { input, paddings, filter }, attrs = new Dictionary() { ["mode"] = mode, ["strides"] = strides, ["padding"] = padding } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3502,6 +3659,10 @@ public static Tensor fused_resize_and_pad_conv2d(Tensor input, Tensor size, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedResizeAndPadConv2D", name) { args = new object[] { input, size, paddings, filter }, attrs = new Dictionary() { ["resize_align_corners"] = resize_align_corners, ["mode"] = mode, ["strides"] = strides, ["padding"] = padding } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3582,6 +3743,10 @@ public static Tensor in_top_k(Tensor predictions, Tensor targets, int k = 0, str var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InTopK", name) { args = new object[] { predictions, targets }, attrs = new Dictionary() { ["k"] = k } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3653,6 +3818,10 @@ public static Tensor in_top_kv2(Tensor predictions, Tensor targets, Tensor k, st var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InTopKV2", name) { args = new object[] { predictions, targets, k }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3707,6 +3876,10 @@ public static Tensor[] isotonic_regression(Tensor input, TF_DataType output_dtyp var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsotonicRegression", name) { args = new object[] { input }, attrs = new Dictionary() { ["output_dtype"] = output_dtype } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3792,6 +3965,10 @@ public static Tensor lrn(Tensor input, int depth_radius = 5, float bias = 1f, fl var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LRN", name) { args = new object[] { input }, attrs = new Dictionary() { ["depth_radius"] = depth_radius, ["bias"] = bias, ["alpha"] = alpha, ["beta"] = beta } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3846,6 +4023,10 @@ public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string? nam var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LeakyRelu", name) { args = new object[] { features }, attrs = new Dictionary() { ["alpha"] = alpha } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3898,6 +4079,10 @@ public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float al var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LeakyReluGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { ["alpha"] = alpha } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -3956,6 +4141,10 @@ public static Tensor log_softmax(Tensor logits, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogSoftmax", name) { args = new object[] { logits }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4035,6 +4224,10 @@ public static Tensor max_pool(Tensor input, int[] ksize, int[] strides, string p var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4119,6 +4312,10 @@ public static Tensor max_pool3d(Tensor input, int[] ksize, int[] strides, string var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3D", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4204,6 +4401,10 @@ public static Tensor max_pool3d_grad(Tensor orig_input, Tensor orig_output, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3DGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4291,6 +4492,10 @@ public static Tensor max_pool3d_grad_grad(Tensor orig_input, Tensor orig_output, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3DGradGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4382,6 +4587,10 @@ public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4469,6 +4678,10 @@ public static Tensor max_pool_grad_grad(Tensor orig_input, Tensor orig_output, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4546,6 +4759,10 @@ public static Tensor max_pool_grad_grad_v2(Tensor orig_input, Tensor orig_output var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGradV2", name) { args = new object[] { orig_input, orig_output, grad, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4628,6 +4845,10 @@ public static Tensor max_pool_grad_grad_with_argmax(Tensor input, Tensor grad, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGradWithArgmax", name) { args = new object[] { input, grad, argmax }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4701,6 +4922,10 @@ public static Tensor max_pool_grad_v2(Tensor orig_input, Tensor orig_output, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradV2", name) { args = new object[] { orig_input, orig_output, grad, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4783,6 +5008,10 @@ public static Tensor max_pool_grad_with_argmax(Tensor input, Tensor grad, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradWithArgmax", name) { args = new object[] { input, grad, argmax }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4854,6 +5083,10 @@ public static Tensor max_pool_v2(Tensor input, Tensor ksize, Tensor strides, str var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolV2", name) { args = new object[] { input, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -4946,6 +5179,10 @@ public static Tensor[] max_pool_with_argmax(Tensor input, int[] ksize, int[] str var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolWithArgmax", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["Targmax"] = Targmax, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5018,6 +5255,10 @@ public static Tensor nth_element(Tensor input, Tensor n, bool reverse = false, s var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NthElement", name) { args = new object[] { input, n }, attrs = new Dictionary() { ["reverse"] = reverse } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5088,6 +5329,10 @@ public static Tensor[] quantized_avg_pool(Tensor input, Tensor min_input, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedAvgPool", name) { args = new object[] { input, min_input, max_input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5174,6 +5419,10 @@ public static Tensor[] quantized_batch_norm_with_global_normalization(Tensor t, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedBatchNormWithGlobalNormalization", name) { args = new object[] { t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max }, attrs = new Dictionary() { ["out_type"] = out_type, ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5251,6 +5500,10 @@ public static Tensor[] quantized_bias_add(Tensor input, Tensor bias, Tensor min_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedBiasAdd", name) { args = new object[] { input, bias, min_input, max_input, min_bias, max_bias }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5344,6 +5597,10 @@ public static Tensor[] quantized_conv2d(Tensor input, Tensor filter, Tensor min_ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2D", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5420,6 +5677,10 @@ public static Tensor[] quantized_conv2d_and_relu(Tensor input, Tensor filter, Te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndRelu", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5499,6 +5760,10 @@ public static Tensor[] quantized_conv2d_and_relu_and_requantize(Tensor input, Te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndReluAndRequantize", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5580,6 +5845,10 @@ public static Tensor[] quantized_conv2d_and_requantize(Tensor input, Tensor filt var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndRequantize", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5662,6 +5931,10 @@ public static Tensor[] quantized_conv2d_per_channel(Tensor input, Tensor filter, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DPerChannel", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5739,6 +6012,10 @@ public static Tensor[] quantized_conv2d_with_bias(Tensor input, Tensor filter, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBias", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5818,6 +6095,10 @@ public static Tensor[] quantized_conv2d_with_bias_and_relu(Tensor input, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5899,6 +6180,10 @@ public static Tensor[] quantized_conv2d_with_bias_and_relu_and_requantize(Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -5982,6 +6267,10 @@ public static Tensor[] quantized_conv2d_with_bias_and_requantize(Tensor input, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6068,6 +6357,10 @@ public static Tensor[] quantized_conv2d_with_bias_signed_sum_and_relu_and_requan var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6153,6 +6446,10 @@ public static Tensor[] quantized_conv2d_with_bias_sum_and_relu(Tensor input, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSumAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6238,6 +6535,10 @@ public static Tensor[] quantized_conv2d_with_bias_sum_and_relu_and_requantize(Te var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSumAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6322,6 +6623,10 @@ public static Tensor[] quantized_depthwise_conv2d(Tensor input, Tensor filter, T var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2D", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6400,6 +6705,10 @@ public static Tensor[] quantized_depthwise_conv2d_with_bias(Tensor input, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBias", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6484,6 +6793,10 @@ public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu(Tensor inpu var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBiasAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6571,6 +6884,10 @@ public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu_and_requant var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6660,6 +6977,10 @@ public static Tensor[] quantized_mat_mul_with_bias(Tensor a, Tensor b, Tensor bi var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBias", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6735,6 +7056,10 @@ public static Tensor quantized_mat_mul_with_bias_and_dequantize(Tensor a, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndDequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6828,6 +7153,10 @@ public static Tensor[] quantized_mat_mul_with_bias_and_relu(Tensor a, Tensor b, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndRelu", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6922,6 +7251,10 @@ public static Tensor[] quantized_mat_mul_with_bias_and_relu_and_requantize(Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndReluAndRequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -6999,6 +7332,10 @@ public static Tensor[] quantized_mat_mul_with_bias_and_requantize(Tensor a, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndRequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7083,6 +7420,10 @@ public static Tensor[] quantized_max_pool(Tensor input, Tensor min_input, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMaxPool", name) { args = new object[] { input, min_input, max_input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7140,6 +7481,10 @@ public static Tensor[] quantized_relu(Tensor features, Tensor min_features, Tens var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedRelu", name) { args = new object[] { features, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7195,6 +7540,10 @@ public static Tensor[] quantized_relu6(Tensor features, Tensor min_features, Ten var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedRelu6", name) { args = new object[] { features, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7251,6 +7600,10 @@ public static Tensor[] quantized_relu_x(Tensor features, Tensor max_value, Tenso var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReluX", name) { args = new object[] { features, max_value, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7312,6 +7665,10 @@ public static Tensor relu(Tensor features, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Relu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7361,6 +7718,10 @@ public static Tensor relu6(Tensor features, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Relu6", name) { args = new object[] { features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7411,6 +7772,10 @@ public static Tensor relu_grad(Tensor gradients, Tensor features, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReluGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7472,6 +7837,10 @@ public static Tensor selu(Tensor features, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Selu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7522,6 +7891,10 @@ public static Tensor selu_grad(Tensor gradients, Tensor outputs, string? name = var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SeluGrad", name) { args = new object[] { gradients, outputs }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7579,6 +7952,10 @@ public static Tensor softmax(Tensor logits, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softmax", name) { args = new object[] { logits }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7634,6 +8011,10 @@ public static Tensor[] softmax_cross_entropy_with_logits(Tensor features, Tensor var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftmaxCrossEntropyWithLogits", name) { args = new object[] { features, labels }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7684,6 +8065,10 @@ public static Tensor softplus(Tensor features, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softplus", name) { args = new object[] { features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7734,6 +8119,10 @@ public static Tensor softplus_grad(Tensor gradients, Tensor features, string? na var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftplusGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7784,6 +8173,10 @@ public static Tensor softsign(Tensor features, string? name = null) var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softsign", name) { args = new object[] { features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7834,6 +8227,10 @@ public static Tensor softsign_grad(Tensor gradients, Tensor features, string? na var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftsignGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); return _fast_path_result[0]; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7895,6 +8292,10 @@ public static Tensor[] sparse_softmax_cross_entropy_with_logits(Tensor features, var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSoftmaxCrossEntropyWithLogits", name) { args = new object[] { features, labels }, attrs = new Dictionary() { } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -7973,6 +8374,10 @@ public static Tensor[] top_k(Tensor input, int k = 0, bool sorted = true, string var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TopK", name) { args = new object[] { input }, attrs = new Dictionary() { ["k"] = k, ["sorted"] = sorted } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } @@ -8045,6 +8450,10 @@ public static Tensor[] top_kv2(Tensor input, Tensor k, bool sorted = true, strin var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TopKV2", name) { args = new object[] { input, k }, attrs = new Dictionary() { ["sorted"] = sorted } }); return _fast_path_result; } + catch (NotOkStatusException ex) + { + throw ex; + } catch (Exception) { } diff --git a/tools/Tensorflow.CodeGen/GenOpsWriter.cs b/tools/Tensorflow.CodeGen/GenOpsWriter.cs index 7601acdbb..9eefca07e 100644 --- a/tools/Tensorflow.CodeGen/GenOpsWriter.cs +++ b/tools/Tensorflow.CodeGen/GenOpsWriter.cs @@ -39,6 +39,7 @@ public void WriteAll() // Add commonly used namespaces. sb.AppendLine("using Tensorflow.Eager;"); sb.AppendLine("using Tensorflow.Contexts;"); + sb.AppendLine("using Tensorflow.Exceptions;"); sb.AppendLine("using static Tensorflow.Binding;"); sb.AppendLine(); From 675b93a9d752b300313c007069518dc75bf9784a Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Sat, 17 Jun 2023 23:10:37 +0800 Subject: [PATCH 042/182] fix: none gradient error when training LSTM. --- src/TensorFlowNET.Core/APIs/tf.tensor.cs | 6 +- src/TensorFlowNET.Core/Common/Types/Nest.cs | 18 +- .../Eager/EagerRunner.TFE_FastPathExecute.cs | 6 +- .../Eager/EagerRunner.TFE_TapeGradient.cs | 8 +- .../Gradients/array_grad.cs | 5 +- .../Keras/ArgsDefinition/Rnn/LSTMArgs.cs | 2 - .../Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs | 2 +- .../Keras/ArgsDefinition/Rnn/RNNArgs.cs | 26 +- .../ArgsDefinition/Rnn/StackedRNNCellsArgs.cs | 3 +- .../Keras/Layers/ILayersApi.cs | 2 +- .../Operations/NnOps/BasicLSTMCell.cs | 2 +- .../Operations/OpDefLibrary.cs | 9 +- .../Operations/_GraphTensorArray.cs | 3 +- .../Operations/array_ops.cs | 33 +- .../Operations/gen_resource_variable_ops.cs | 1573 +++++++++++++++-- .../Operations/image_ops_impl.cs | 6 +- src/TensorFlowNET.Core/Operations/while_v2.cs | 4 +- .../Variables/BaseResourceVariable.cs | 23 +- src/TensorFlowNET.Keras/Engine/Layer.Apply.cs | 2 + src/TensorFlowNET.Keras/Layers/LayersApi.cs | 15 +- src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs | 102 +- .../Layers/Rnn/LSTMCell.cs | 17 +- src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 54 +- .../Layers/Rnn/SimpleRNN.cs | 7 +- .../Layers/Rnn/SimpleRNNCell.cs | 5 - .../Layers/Rnn/StackedRNNCells.cs | 12 +- .../Layers/Rnn.Test.cs | 74 +- tools/Tensorflow.CodeGen/OpClassifier.cs | 2 +- tools/Tensorflow.CodeGen/Utils.cs | 17 +- 29 files changed, 1743 insertions(+), 295 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.tensor.cs b/src/TensorFlowNET.Core/APIs/tf.tensor.cs index be8c2ab24..45aebc0cd 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tensor.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tensor.cs @@ -71,15 +71,15 @@ public Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides = n public Tensor[] split(Tensor value, int num_split, Tensor axis, string name = null) => array_ops.split( value: value, - num_split: num_split, + num_or_size_splits: num_split, axis: axis, name: name); public Tensor[] split(Tensor value, int num_split, int axis, string name = null) => array_ops.split( value: value, - num_split: num_split, - axis: axis, + num_or_size_splits: num_split, + axis: ops.convert_to_tensor(axis), name: name); public Tensor ensure_shape(Tensor x, Shape shape, string name = null) diff --git a/src/TensorFlowNET.Core/Common/Types/Nest.cs b/src/TensorFlowNET.Core/Common/Types/Nest.cs index 4de7d1fa5..89ce29f2f 100644 --- a/src/TensorFlowNET.Core/Common/Types/Nest.cs +++ b/src/TensorFlowNET.Core/Common/Types/Nest.cs @@ -197,25 +197,11 @@ public bool IsNested() } else if(NestType is NestType.List) { - foreach(var item in ListValue!) - { - if(item.NestType is NestType.List or NestType.Dictionary) - { - return true; - } - } - return false; + return ListValue!.Count > 0; } else { - foreach (var item in DictValue!.Values) - { - if (item.NestType is NestType.List or NestType.Dictionary) - { - return true; - } - } - return false; + return DictValue!.Count > 0; } } diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs index 5f156fd9b..0ce55841b 100644 --- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs +++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs @@ -352,7 +352,11 @@ bool SetOpAttrScalar(Context ctx, SafeEagerOpHandle op, c_api.TFE_OpSetAttrFloat(op, key, Convert.ToSingle(value)); break; case TF_AttrType.TF_ATTR_SHAPE: - var dims = (value as long[]).ToArray(); + long[] dims; + if (value is Shape shape) dims = shape.dims.ToArray(); + else if (value is long[] longs) dims = longs.ToArray(); + else if (value is int[] ints) dims = ints.Select(x => (long)x).ToArray(); + else dims = ((long[])value).ToArray(); c_api.TFE_OpSetAttrShape(op, key, dims, dims.Length, status); status.Check(true); break; diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs index 1f7b3ae64..849dcb3f2 100644 --- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs +++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs @@ -137,7 +137,6 @@ TapeTensor TapeTensorFromTensor(Tensor tensor) { dims[i] = c_api.TFE_TensorHandleDim(handle, i, status); } - Shape tensor_shape = new(dims); if(status.Code != TF_Code.TF_OK) { @@ -145,6 +144,7 @@ TapeTensor TapeTensorFromTensor(Tensor tensor) } else { + Shape tensor_shape = new(dims); return new TapeTensor(id, dtype, tensor_shape); } } @@ -173,8 +173,12 @@ bool DTypeNeedsHandleData(TF_DataType dtype) return dtype == dtypes.variant || dtype == dtypes.resource; } - bool ListContainNone(long[] list) + bool ListContainNone(long[]? list) { + if(list is null) + { + return true; + } int len = list.Length; if(len == 0) { diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs index f939f7b69..1b6bc95ee 100644 --- a/src/TensorFlowNET.Core/Gradients/array_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs @@ -90,8 +90,7 @@ private static Tensor[] _ConcatGradHelper(Operation op, Tensor grad, int start_v ? input_values[0].rank + dim_int : dim_int % input_values[0].rank; var sizes = input_values.Select(x => x.shape[non_neg_concat_dim]).ToArray(); - var sizes_tensor = constant_op.constant(sizes); - out_grads = array_ops.split(grad, sizes_tensor, non_neg_concat_dim).ToList(); + out_grads = array_ops.split(grad, sizes.Select(x => (int)x).ToArray(), ops.convert_to_tensor(non_neg_concat_dim)).ToList(); } else if (constant_op.is_constant(concat_dim)) { @@ -127,7 +126,7 @@ there will be a small number of performance regressions.*/ new Tensor[] { non_neg_concat_dim, tf.constant(0) }, new Tensor[] { tf.constant(1), tf.constant(-1) }); var squeeze_sizes = array_ops.squeeze(slice); - out_grads = array_ops.split(axis: grad, value: squeeze_sizes, num_split: (int)non_neg_concat_dim).ToList(); + out_grads = array_ops.split(axis: grad, value: squeeze_sizes, num_or_size_splits: (int)non_neg_concat_dim).ToList(); } else { diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs index 764641474..db76fda06 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs @@ -4,8 +4,6 @@ public class LSTMArgs : RNNArgs { // TODO: maybe change the `RNNArgs` and implement this class. public bool UnitForgetBias { get; set; } - public float Dropout { get; set; } - public float RecurrentDropout { get; set; } public int Implementation { get; set; } } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs index 786236e4d..1b26c05ca 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs @@ -29,7 +29,7 @@ public class LSTMCellArgs : AutoSerializeLayerArgs [JsonProperty("unit_forget_bias")] public bool UnitForgetBias { get; set; } = true; [JsonProperty("implementation")] - public int Implementation { get; set; } = 2; + public int Implementation { get; set; } = 1; } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs index 116ff7a2f..2d7fb001a 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs @@ -7,12 +7,6 @@ namespace Tensorflow.Keras.ArgsDefinition.Rnn // TODO(Rinne): add regularizers. public class RNNArgs : AutoSerializeLayerArgs { - [JsonProperty("cell")] - // TODO: the cell should be serialized with `serialize_keras_object`. - public IRnnCell Cell { get; set; } = null; - [JsonProperty("cells")] - public IList Cells { get; set; } = null; - [JsonProperty("return_sequences")] public bool ReturnSequences { get; set; } = false; [JsonProperty("return_state")] @@ -25,8 +19,10 @@ public class RNNArgs : AutoSerializeLayerArgs public bool Unroll { get; set; } = false; [JsonProperty("time_major")] public bool TimeMajor { get; set; } = false; + + public int? InputDim { get; set; } + public int? InputLength { get; set; } // TODO: Add `num_constants` and `zero_output_for_mask`. - public Dictionary Kwargs { get; set; } = null; public int Units { get; set; } public Activation Activation { get; set; } @@ -38,21 +34,5 @@ public class RNNArgs : AutoSerializeLayerArgs public float Dropout { get; set; } = .0f; public bool ZeroOutputForMask { get; set; } = false; public float RecurrentDropout { get; set; } = .0f; - - // kernel_regularizer=None, - // recurrent_regularizer=None, - // bias_regularizer=None, - // activity_regularizer=None, - // kernel_constraint=None, - // recurrent_constraint=None, - // bias_constraint=None, - // dropout=0., - // recurrent_dropout=0., - // return_sequences=False, - // return_state=False, - // go_backwards=False, - // stateful=False, - // unroll=False, - // **kwargs): } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs index ea6f830b8..50a6127df 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs @@ -5,7 +5,6 @@ namespace Tensorflow.Keras.ArgsDefinition.Rnn { public class StackedRNNCellsArgs : LayerArgs { - public IList Cells { get; set; } - public Dictionary Kwargs { get; set; } = null; + public bool ReverseStateOrder = false; } } diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index a19508d42..1eb08e77e 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -182,7 +182,7 @@ public ILayer LSTM(int units, bool unit_forget_bias = true, float dropout = 0f, float recurrent_dropout = 0f, - int implementation = 2, + int implementation = 1, bool return_sequences = false, bool return_state = false, bool go_backwards = false, diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs index b2cda952e..16cbd0010 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs @@ -89,7 +89,7 @@ protected Tensors Call(Tensors inputs, Tensor state = null, bool is_training = f gate_inputs = nn_ops.bias_add(gate_inputs, _bias); // i = input_gate, j = new_input, f = forget_gate, o = output_gate - var tensors = array_ops.split(value: gate_inputs, num_split: 4, axis: one); + var tensors = array_ops.split(value: gate_inputs, num_or_size_splits: 4, axis: one); var (i, j, f, o) = (tensors[0], tensors[1], tensors[2], tensors[3]); var forget_bias_tensor = constant_op.constant(_forget_bias, dtype: f.dtype); diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 5ff5ccffc..29e1f074f 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -389,9 +389,13 @@ private AttrValue SetAttrValue(OpDef op_def, AttrDef attr_def, object value) case "list(type)": attr_value.List.Type.AddRange((value as IList).Select(x => _MakeType(x, attr_def))); break; + case "list(float)": + if (value != null) + attr_value.List.F.AddRange((value as IEnumerable).ToArray()); + break; case "list(int)": if (value != null) - attr_value.List.I.AddRange((value as int[]).Select(x => Convert.ToInt64(x))); + attr_value.List.I.AddRange((value as IEnumerable).Select(x => Convert.ToInt64(x))); break; case "bool": attr_value.B = (bool)value; @@ -428,6 +432,9 @@ private AttrValue SetAttrValue(OpDef op_def, AttrDef attr_def, object value) case "list(func)": attr_value.List.Func.AddRange(_MakeFuncList(value, attr_def.Name)); break; + case "list(string)": + attr_value.List.S.AddRange((value as IEnumerable).Select(x => ByteString.CopyFromUtf8(x))); + break; default: throw new TypeError($"SetAttrValue: can't not convert attr_def.Type '{attr_def.Type}' to protos."); } diff --git a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs index 4c3fde316..2384e8146 100644 --- a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs @@ -390,7 +390,8 @@ public override Tensor stack(string name = null) int ta_size; if(!_dynamic_size && (_size is not null)) { - ta_size = (int)tensor_util.constant_value(_size); + var size_tensor = tensor_util.constant_value(_size); + ta_size = size_tensor is null ? -1 : (int)size_tensor; } else { diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index c4ec974b8..6b4fea63a 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -1014,38 +1014,27 @@ public static Tensor matrix_transpose(Tensor a, string name = "matrix_transpose" }); } - public static Tensor[] split(Tensor value, Tensor size_splits, int axis, int num = -1, + public static Tensor[] split(Tensor value, int num_or_size_splits, Tensor axis = null, string name = "split") { - if (num == -1) - num = (int)size_splits.shape[0]; - - return gen_array_ops.split_v(value, size_splits, tf.convert_to_tensor(axis), num, name: name); + return gen_array_ops.split(split_dim: axis, value: value, num_split: num_or_size_splits, name); } - public static Tensor[] split(Tensor value, int num_split, T axis, + public static Tensor[] split(Tensor value, int[] num_or_size_splits, Tensor axis = null, int num = -1, string name = "split") { - var size_splits = ops.convert_to_tensor(num_split); - - if (tf.Context.executing_eagerly()) + if(num_or_size_splits.Length == 0) { - return split_eager_fallback(axis, value, num_split: num_split, name: name, ctx: tf.Context); + throw new ValueError("Rank-0 tensors are not supported as the num_or_size_splits argument to split."); } + var size_splits = ops.convert_to_tensor(num_or_size_splits); - var _op = tf.OpDefLib._apply_op_helper("Split", name, new { split_dim = axis, value, num_split }); - return _op.outputs; - } - - private static Tensor[] split_eager_fallback(Ta axis, Tv value, int num_split, string name, Context ctx = null) - { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { value }); - var axis_tensor = ops.convert_to_tensor(axis, dtype: TF_DataType.TF_INT32); - var _inputs_flat = new List { axis_tensor }; - _inputs_flat.AddRange(input); - var _attrs = new object[] { "num_split", num_split, "T", _attr_T }; + if(num == -1) + { + num = (int)size_splits.shape[0]; + } - return tf.Runner.Execute(ctx, "Split", num_split, _inputs_flat.ToArray(), _attrs, name: name); + return gen_array_ops.split_v(value: value, size_splits: size_splits, split_dim: axis, num_split: num, name: name); } public static Tensor slice(Tensor input, Tensor[] begin, Tensor[] size, string name = null) diff --git a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs index c4e8f8c41..db5f6813c 100644 --- a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs @@ -1,158 +1,1523 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ +using Tensorflow.Eager; +using Tensorflow.Contexts; +using Tensorflow.Exceptions; using static Tensorflow.Binding; -namespace Tensorflow +namespace Tensorflow; + +public static class gen_resource_variable_ops { - public static class gen_resource_variable_ops + /// + /// Adds a value to the current value of a variable. + /// + /// + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to + /// see the incremented value or a subsequent newer one. + /// + /// + /// + /// + /// + public static Operation assign_add_variable_op(Tensor resource, Tensor value, string? name = null) { - public static Operation assign_sub_variable_op(Tensor resource, Tensor value, string name = null) + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - if (tf.Context.executing_eagerly()) + try { - tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - tf.Context, "AssignSubVariableOp", name, resource, value)); - + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AssignAddVariableOp", name) { args = new object[] { resource, value }, attrs = new Dictionary() { } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return assign_add_variable_op_eager_fallback(resource, value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["value"] = value; + var _op = tf.OpDefLib._apply_op_helper("AssignAddVariableOp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("AssignAddVariableOp", _op.inputs, _attrs, _result); + } + return _op; + } - return null; + public static Operation assign_add_variable_op_eager_fallback(Tensor resource, Tensor value, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, value }; + object[] _attrs = new object[] { "dtype", value.dtype }; + var _result = _execute.execute("AssignAddVariableOp", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AssignAddVariableOp", _inputs_flat, _attrs, _result); } + return null; + } + /// + /// Subtracts a value from the current value of a variable. + /// + /// + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to + /// see the decremented value or a subsequent newer one. + /// + /// + /// + /// + /// + public static Operation assign_sub_variable_op(Tensor resource, Tensor value, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AssignSubVariableOp", name) { args = new object[] { resource, value }, attrs = new Dictionary() { } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return assign_sub_variable_op_eager_fallback(resource, value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["value"] = value; + var _op = tf.OpDefLib._apply_op_helper("AssignSubVariableOp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("AssignSubVariableOp", _op.inputs, _attrs, _result); + } + return _op; + } - /// - /// Adds a value to the current value of a variable. - /// - /// - /// - /// - /// - public static Operation assign_add_variable_op(Tensor resource, Tensor value, string name = null) + public static Operation assign_sub_variable_op_eager_fallback(Tensor resource, Tensor value, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, value }; + object[] _attrs = new object[] { "dtype", value.dtype }; + var _result = _execute.execute("AssignSubVariableOp", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AssignSubVariableOp", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Assigns a new value to a variable. + /// + /// + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to return + /// this value or a subsequent newer value of the variable. + /// + /// + /// + /// + /// + /// + public static Operation assign_variable_op(Tensor resource, Tensor value, bool validate_shape = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - if (tf.Context.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AssignVariableOp", name) { args = new object[] { resource, value }, attrs = new Dictionary() { ["validate_shape"] = validate_shape } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) { - tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AssignAddVariableOp", name, - resource, value)); + } + try + { + return assign_variable_op_eager_fallback(resource, value, validate_shape: validate_shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["value"] = value; + keywords["validate_shape"] = validate_shape; + var _op = tf.OpDefLib._apply_op_helper("AssignVariableOp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "validate_shape", _op._get_attr_bool("validate_shape") }; + _execute.record_gradient("AssignVariableOp", _op.inputs, _attrs, _result); + } + return _op; + } + public static Operation assign_variable_op_eager_fallback(Tensor resource, Tensor value, bool validate_shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, value }; + object[] _attrs = new object[] { "dtype", value.dtype, "validate_shape", validate_shape }; + var _result = _execute.execute("AssignVariableOp", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AssignVariableOp", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// This op consumes a lock created by `MutexLock`. + /// + /// + /// + /// This op exists to consume a tensor created by `MutexLock` (other than + /// direct control dependencies). It should be the only that consumes the tensor, + /// and will raise an error if it is not. Its only purpose is to keep the + /// mutex lock tensor alive until it is consumed by this op. + /// + /// **NOTE**: This operation must run on the same device as its input. This may + /// be enforced via the `colocate_with` mechanism. + /// + /// + /// + /// + public static Operation consume_mutex_lock(Tensor mutex_lock, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConsumeMutexLock", name) { args = new object[] { mutex_lock }, attrs = new Dictionary() { } }); return null; } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return consume_mutex_lock_eager_fallback(mutex_lock, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["mutex_lock"] = mutex_lock; + var _op = tf.OpDefLib._apply_op_helper("ConsumeMutexLock", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ConsumeMutexLock", _op.inputs, _attrs, _result); + } + return _op; + } - var _op = tf.OpDefLib._apply_op_helper("AssignAddVariableOp", name, new { resource, value }); + public static Operation consume_mutex_lock_eager_fallback(Tensor mutex_lock, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { mutex_lock }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ConsumeMutexLock", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ConsumeMutexLock", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Deletes the resource specified by the handle. + /// + /// + /// + /// All subsequent operations using the resource will result in a NotFound + /// error status. + /// + /// + /// + /// + /// + /// whether to ignore the error when the resource + /// doesn't exist. + /// + /// + /// + public static Operation destroy_resource_op(Tensor resource, bool ignore_lookup_error = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DestroyResourceOp", name) { args = new object[] { resource }, attrs = new Dictionary() { ["ignore_lookup_error"] = ignore_lookup_error } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return destroy_resource_op_eager_fallback(resource, ignore_lookup_error: ignore_lookup_error, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["ignore_lookup_error"] = ignore_lookup_error; + var _op = tf.OpDefLib._apply_op_helper("DestroyResourceOp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ignore_lookup_error", _op._get_attr_bool("ignore_lookup_error") }; + _execute.record_gradient("DestroyResourceOp", _op.inputs, _attrs, _result); + } + return _op; + } - return _op; + public static Operation destroy_resource_op_eager_fallback(Tensor resource, bool ignore_lookup_error, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource }; + object[] _attrs = new object[] { "ignore_lookup_error", ignore_lookup_error }; + var _result = _execute.execute("DestroyResourceOp", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DestroyResourceOp", _inputs_flat, _attrs, _result); } + return null; + } + /// + /// Turns off the copy-on-read mode. + /// + /// + /// + /// Turns off the copy-on-read mode of a resource variable. If the variable is not in copy-on-read mode, this op has no effect. + /// + /// + /// + /// + public static Operation disable_copy_on_read(Tensor resource, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DisableCopyOnRead", name) { args = new object[] { resource }, attrs = new Dictionary() { } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return disable_copy_on_read_eager_fallback(resource, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + var _op = tf.OpDefLib._apply_op_helper("DisableCopyOnRead", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("DisableCopyOnRead", _op.inputs, _attrs, _result); + } + return _op; + } - public static Operation assign_variable_op(Tensor resource, Tensor value, string name = null) + public static Operation disable_copy_on_read_eager_fallback(Tensor resource, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("DisableCopyOnRead", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - if (tf.Context.executing_eagerly()) + _execute.record_gradient("DisableCopyOnRead", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Locks a mutex resource. The output is the lock. So long as the lock tensor + /// + /// + /// + /// is alive, any other request to use `MutexLock` with this mutex will wait. + /// + /// This is particularly useful for creating a critical section when used in + /// conjunction with `MutexLockIdentity`: + /// + /// ```python + /// + /// mutex = mutex_v2( + /// shared_name=handle_name, container=container, name=name) + /// + /// def execute_in_critical_section(fn, *args, **kwargs): + /// lock = gen_resource_variable_ops.mutex_lock(mutex) + /// + /// with ops.control_dependencies([lock]): + /// r = fn(*args, **kwargs) + /// + /// with ops.control_dependencies(nest.flatten(r)): + /// with ops.colocate_with(mutex): + /// ensure_lock_exists = mutex_lock_identity(lock) + /// + /// # Make sure that if any element of r is accessed, all of + /// # them are executed together. + /// r = nest.map_structure(tf.identity, r) + /// + /// with ops.control_dependencies([ensure_lock_exists]): + /// return nest.map_structure(tf.identity, r) + /// ``` + /// + /// While `fn` is running in the critical section, no other functions which wish to + /// use this critical section may run. + /// + /// Often the use case is that two executions of the same graph, in parallel, + /// wish to run `fn`; and we wish to ensure that only one of them executes + /// at a time. This is especially important if `fn` modifies one or more + /// variables at a time. + /// + /// It is also useful if two separate functions must share a resource, but we + /// wish to ensure the usage is exclusive. + /// + /// + /// + /// + public static Tensor mutex_lock(Tensor mutex, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MutexLock", name) { args = new object[] { mutex }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try { - tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AssignVariableOp", name, - resource, value)); + return mutex_lock_eager_fallback(mutex, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["mutex"] = mutex; + var _op = tf.OpDefLib._apply_op_helper("MutexLock", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("MutexLock", _op.inputs, _attrs, _result); + } + return _result[0]; + } - return null; + public static Tensor mutex_lock_eager_fallback(Tensor mutex, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { mutex }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("MutexLock", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MutexLock", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Creates a Mutex resource that can be locked by `MutexLock`. + /// + /// + /// + /// If non-empty, this variable is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// + /// + /// If non-empty, this variable is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// + public static Tensor mutex_v2(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MutexV2", name) { args = new object[] { }, attrs = new Dictionary() { ["container"] = container, ["shared_name"] = shared_name } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { } + try + { + return mutex_v2_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } + Dictionary keywords = new(); + keywords["container"] = container; + keywords["shared_name"] = shared_name; + var _op = tf.OpDefLib._apply_op_helper("MutexV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("MutexV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } - var _op = tf.OpDefLib._apply_op_helper("AssignVariableOp", name, new { resource, value }); + public static Tensor mutex_v2_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("MutexV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MutexV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Reads the value of a variable. + /// + /// + /// + /// The tensor returned by this operation is immutable. + /// + /// The value returned by this operation is guaranteed to be influenced by all the + /// writes on which this operation depends directly or indirectly, and to not be + /// influenced by any of the writes which depend directly or indirectly on this + /// operation. + /// + /// + /// + /// + /// + /// the dtype of the value. + /// + /// + /// + public static Tensor read_variable_op(Tensor resource, TF_DataType dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReadVariableOp", name) { args = new object[] { resource }, attrs = new Dictionary() { ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return read_variable_op_eager_fallback(resource, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("ReadVariableOp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("ReadVariableOp", _op.inputs, _attrs, _result); + } + return _result[0]; + } - return _op; + public static Tensor read_variable_op_eager_fallback(Tensor resource, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource }; + object[] _attrs = new object[] { "dtype", dtype }; + var _result = _execute.execute("ReadVariableOp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReadVariableOp", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gather slices from the variable pointed to by `resource` according to `indices`. + /// + /// + /// + /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + /// + /// ```python + /// # Scalar indices + /// output[:, ..., :] = params[indices, :, ... :] + /// + /// # Vector indices + /// output[i, :, ..., :] = params[indices[i], :, ... :] + /// + /// # Higher rank indices + /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor resource_gather(Tensor resource, Tensor indices, TF_DataType dtype, int batch_dims = 0, bool validate_indices = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceGather", name) { args = new object[] { resource, indices }, attrs = new Dictionary() { ["batch_dims"] = batch_dims, ["validate_indices"] = validate_indices, ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return resource_gather_eager_fallback(resource, indices, batch_dims: batch_dims, validate_indices: validate_indices, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["batch_dims"] = batch_dims; + keywords["validate_indices"] = validate_indices; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("ResourceGather", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "batch_dims", _op._get_attr_int("batch_dims"), "validate_indices", _op._get_attr_bool("validate_indices"), "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceGather", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor var_is_initialized_op(Tensor resource, string name = null) + public static Tensor resource_gather_eager_fallback(Tensor resource, Tensor indices, int batch_dims, bool validate_indices, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices }; + object[] _attrs = new object[] { "batch_dims", batch_dims, "validate_indices", validate_indices, "dtype", dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceGather", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - if (tf.Context.executing_eagerly()) + _execute.record_gradient("ResourceGather", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor resource_gather_nd(Tensor resource, Tensor indices, TF_DataType dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try { - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "VarIsInitializedOp", name, - resource)); + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceGatherNd", name) { args = new object[] { resource, indices }, attrs = new Dictionary() { ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return resource_gather_nd_eager_fallback(resource, indices, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("ResourceGatherNd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceGatherNd", _op.inputs, _attrs, _result); + } + return _result[0]; + } - return results[0]; + public static Tensor resource_gather_nd_eager_fallback(Tensor resource, Tensor indices, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices }; + object[] _attrs = new object[] { "dtype", dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceGatherNd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceGatherNd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Adds sparse updates to the variable referenced by `resource`. + /// + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] += updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] += updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions add. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + ///
+ /// + /// + /// + /// + public static Operation resource_scatter_add(Tensor resource, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterAdd", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary() { } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; } + catch (Exception) + { + } + try + { + return resource_scatter_add_eager_fallback(resource, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ResourceScatterAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceScatterAdd", _op.inputs, _attrs, _result); + } + return _op; + } - var _op = tf.OpDefLib._apply_op_helper("VarIsInitializedOp", name, new { resource }); + public static Operation resource_scatter_add_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates }; + object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceScatterAdd", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceScatterAdd", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Divides sparse updates into the variable referenced by `resource`. + /// + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] /= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] /= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions multiply. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + ///
+ /// + /// + /// + /// + public static Operation resource_scatter_div(Tensor resource, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterDiv", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary() { } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return resource_scatter_div_eager_fallback(resource, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ResourceScatterDiv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceScatterDiv", _op.inputs, _attrs, _result); + } + return _op; + } - return _op.output; + public static Operation resource_scatter_div_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates }; + object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceScatterDiv", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceScatterDiv", _inputs_flat, _attrs, _result); } + return null; + } + /// + /// Reduces sparse updates into the variable referenced by `resource` using the `max` operation. + /// + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = max(ref[indices, ...], updates[...]) + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions are combined. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + ///
+ /// + /// + /// + /// + public static Operation resource_scatter_max(Tensor resource, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterMax", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary() { } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return resource_scatter_max_eager_fallback(resource, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ResourceScatterMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceScatterMax", _op.inputs, _attrs, _result); + } + return _op; + } - /// - /// Creates a handle to a Variable resource. - /// - /// - /// - /// - /// - /// - /// - public static Tensor var_handle_op(TF_DataType dtype, Shape shape, - string container = "", string shared_name = "", string name = null) + public static Operation resource_scatter_max_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates }; + object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceScatterMax", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - if (tf.Context.executing_eagerly()) + _execute.record_gradient("ResourceScatterMax", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Reduces sparse updates into the variable referenced by `resource` using the `min` operation. + /// + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = min(ref[indices, ...], updates[...]) + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions are combined. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + ///
+ /// + /// + /// + /// + public static Operation resource_scatter_min(Tensor resource, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterMin", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary() { } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return resource_scatter_min_eager_fallback(resource, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) { - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "VarHandleOp", name) - { - attrs = ConvertToDict(new - { - dtype, - shape = shape.dims, - container, - shared_name, - allowed_devices = new string[0] - }) - }); + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ResourceScatterMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceScatterMin", _op.inputs, _attrs, _result); + } + return _op; + } - return results[0]; + public static Operation resource_scatter_min_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates }; + object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceScatterMin", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceScatterMin", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Multiplies sparse updates into the variable referenced by `resource`. + /// + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] *= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] *= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions multiply. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + ///
+ /// + /// + /// + /// + public static Operation resource_scatter_mul(Tensor resource, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterMul", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary() { } }); + return null; } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return resource_scatter_mul_eager_fallback(resource, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ResourceScatterMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceScatterMul", _op.inputs, _attrs, _result); + } + return _op; + } - var _op = tf.OpDefLib._apply_op_helper("VarHandleOp", name, new + public static Operation resource_scatter_mul_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates }; + object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceScatterMul", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceScatterMul", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Subtracts sparse updates from the variable referenced by `resource`. + /// + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] -= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] -= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions add. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + ///
+ /// + /// + /// + /// + public static Operation resource_scatter_sub(Tensor resource, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterSub", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary() { } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return resource_scatter_sub_eager_fallback(resource, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) { - dtype, - shape, - container, - shared_name - }); + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ResourceScatterSub", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceScatterSub", _op.inputs, _attrs, _result); + } + return _op; + } - return _op.output; + public static Operation resource_scatter_sub_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates }; + object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceScatterSub", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceScatterSub", _inputs_flat, _attrs, _result); } + return null; + } + /// + /// Assigns sparse updates to the variable referenced by `resource`. + /// + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + /// + /// + /// + /// + /// + /// + public static Operation resource_scatter_update(Tensor resource, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterUpdate", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary() { } }); + return null; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return resource_scatter_update_eager_fallback(resource, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ResourceScatterUpdate", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ResourceScatterUpdate", _op.inputs, _attrs, _result); + } + return _op; + } - public static Tensor destroy_resource_op(Tensor resource, bool ignore_lookup_error = true, string name = null) - => tf.Context.ExecuteOp("DestroyResourceOp", name, - new ExecuteOpArgs(resource).SetAttributes(new { ignore_lookup_error })); + public static Operation resource_scatter_update_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates }; + object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ResourceScatterUpdate", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceScatterUpdate", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Creates a handle to a Variable resource. + /// + /// + /// + /// the container this variable is placed in. + /// + /// + /// + /// + /// the name by which this variable is referred to. + /// + /// + /// + /// + /// the type of this variable. Must agree with the dtypes + /// of all ops using this variable. + /// + /// + /// + /// + /// The (possibly partially specified) shape of this variable. + /// + /// + /// + /// + /// DEPRECATED. The allowed devices containing the resource variable. Set when the + /// output ResourceHandle represents a per-replica/partitioned resource variable. + /// + /// + /// + public static Tensor var_handle_op(TF_DataType dtype, Shape shape, string container = "", string shared_name = "", string[] allowed_devices = null, string? name = null) + { + var _ctx = tf.Context; + if (allowed_devices is null) + { + allowed_devices = new string[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "VarHandleOp", name) { args = new object[] { }, attrs = new Dictionary() { ["container"] = container, ["shared_name"] = shared_name, ["dtype"] = dtype, ["shape"] = shape, ["allowed_devices"] = allowed_devices } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return var_handle_op_eager_fallback(container: container, shared_name: shared_name, dtype: dtype, shape: shape, allowed_devices: allowed_devices, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (container is null) + { + container = ""; + } + if (shared_name is null) + { + shared_name = ""; + } + Dictionary keywords = new(); + keywords["container"] = container; + keywords["shared_name"] = shared_name; + keywords["dtype"] = dtype; + keywords["shape"] = shape; + keywords["allowed_devices"] = allowed_devices; + var _op = tf.OpDefLib._apply_op_helper("VarHandleOp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name"), "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape"), "allowed_devices", _op.get_attr("allowed_devices") }; + _execute.record_gradient("VarHandleOp", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Reads the value of a variable. - /// - /// - /// - /// - /// - public static Tensor read_variable_op(Tensor resource, TF_DataType dtype, string name = null) - => tf.Context.ExecuteOp("ReadVariableOp", name, new ExecuteOpArgs(resource) - .SetAttributes(new { dtype })); + public static Tensor var_handle_op_eager_fallback(string container, string shared_name, TF_DataType dtype, Shape shape, string[] allowed_devices, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name, "dtype", dtype, "shape", shape, "allowed_devices", allowed_devices }; + var _result = _execute.execute("VarHandleOp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("VarHandleOp", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Checks whether a resource handle-based variable has been initialized. + /// + /// + /// + public static Tensor var_is_initialized_op(Tensor resource, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "VarIsInitializedOp", name) { args = new object[] { resource }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return var_is_initialized_op_eager_fallback(resource, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["resource"] = resource; + var _op = tf.OpDefLib._apply_op_helper("VarIsInitializedOp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("VarIsInitializedOp", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor resource_gather(Tensor resource, Tensor indices, TF_DataType dtype, - int batch_dims = 0, bool validate_indices = true, string name = null) + public static Tensor var_is_initialized_op_eager_fallback(Tensor resource, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { resource }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("VarIsInitializedOp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("ResourceGather", name, new + _execute.record_gradient("VarIsInitializedOp", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the shape of the variable pointed to by `resource`. + /// + /// + /// + /// This operation returns a 1-D integer tensor representing the shape of `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// shape(t) ==> [2, 2, 3] + /// ``` + /// + /// + /// + /// + /// + public static Tensor variable_shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "VariableShape", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) { - resource, - indices, - dtype, - batch_dims, - validate_indices - }); + throw ex; + } + catch (Exception) + { + } + try + { + return variable_shape_eager_fallback(input, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("VariableShape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("VariableShape", _op.inputs, _attrs, _result); + } + return _result[0]; + } - return _op.output; + public static Tensor variable_shape_eager_fallback(Tensor input, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "out_type", out_type }; + var _result = _execute.execute("VariableShape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("VariableShape", _inputs_flat, _attrs, _result); } + return _result[0]; } } diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs index 9d52f5161..126df9e42 100644 --- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs +++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs @@ -1778,10 +1778,10 @@ internal static Tensor _bbox_overlap(Tensor boxes_a, Tensor boxes_b) { // a_y_min: [0], a_x_min: [1], a_y_max: [2], a_x_max[3] var a_xy_minmax = array_ops.split( - value: boxes_a, num_split: 4, axis: 2); + value: boxes_a, num_or_size_splits: 4, axis: ops.convert_to_tensor(2)); // b_y_min: [0], b_x_min: [1], b_y_max: [2], b_x_max[3] var b_xy_minmax = array_ops.split( - value: boxes_b, num_split: 4, axis: 2); + value: boxes_b, num_or_size_splits: 4, axis: ops.convert_to_tensor(2)); var i_xmin = math_ops.maximum( a_xy_minmax[1], array_ops.transpose(b_xy_minmax[1], new[] { 0, 2, 1 })); @@ -1943,7 +1943,7 @@ public static (Tensor, Tensor) non_max_suppression_padded_v2(Tensor boxes, Tenso using (ops.name_scope("canonicalize_coordinates")) { // y_1 = [0], x_1 = [1], y_2 = [2], x_2 = [3] - var yx = array_ops.split(value: boxes, num_split: 4, axis: 2); + var yx = array_ops.split(value: boxes, num_or_size_splits: 4, axis: ops.convert_to_tensor(2)); var y_1_is_min = math_ops.reduce_all( gen_math_ops.less_equal(yx[0][0, 0, 0], yx[2][0, 0, 0])); var y_minmax = control_flow_ops.cond( diff --git a/src/TensorFlowNET.Core/Operations/while_v2.cs b/src/TensorFlowNET.Core/Operations/while_v2.cs index 7ee3e9e8d..3f324f872 100644 --- a/src/TensorFlowNET.Core/Operations/while_v2.cs +++ b/src/TensorFlowNET.Core/Operations/while_v2.cs @@ -86,7 +86,7 @@ object[] wrapped_cond(object[] inputs) } } - var cond_graph = FuncGraph.func_graph_from_func("cond", wrapped_cond, null, + var cond_graph = FuncGraph.func_graph_from_func(cond_name, wrapped_cond, null, null, signature: func_graph_signature, add_control_dependencies: add_control_dependencies); bool stateful_parallelism = false; @@ -111,7 +111,7 @@ object[] wrapped_body(object[] inputs) return new object[] { loop_counter + 1, maximum_iterations_arg }.Concat(outputs).ToArray(); } - var body_graph = FuncGraph.func_graph_from_func("body", wrapped_body, null, null, func_graph_signature, + var body_graph = FuncGraph.func_graph_from_func(body_name, wrapped_body, null, null, func_graph_signature, add_control_dependencies: add_control_dependencies, acd_record_initial_resource_uses: stateful_parallelism); // TODO(Rinne): possible wrong implementation here. diff --git a/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs b/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs index b9a7022a2..a54283bd4 100644 --- a/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs +++ b/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs @@ -170,11 +170,28 @@ public IVariableV1 assign_lazy_load(Tensor value, string name = null) public Tensor value() => GraphElement ?? _read_variable_op(); - protected Tensor _read_variable_op() + protected Tensor _read_variable_op(bool no_copy = false) { variable_accessed(this); - var result = gen_resource_variable_ops.read_variable_op(handle, _dtype); - resource_variable_ops._maybe_set_handle_data(_dtype, handle, result); + + Tensor read_and_set_handle(bool no_copy) + { + if (no_copy) + { + gen_resource_variable_ops.disable_copy_on_read(handle); + } + var result = gen_resource_variable_ops.read_variable_op(handle, _dtype); + resource_variable_ops._maybe_set_handle_data(_dtype, handle, result); + return result; + } + + // TODO(Rinne): deal with caching device. + var result = read_and_set_handle(no_copy); + if (!tf.Context.executing_eagerly()) + { + tf.Runner.TFE_TapeSetRecordOperation("ReadVariableOp", new Tensor[] { result }, new Tensor[] { handle }, + backward_function: (x, _) => x); + } // have to set shape when converting to substituent placeholder if (result.shape.ndim == -1) diff --git a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs index a0358f074..d52190fd3 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs @@ -38,6 +38,8 @@ public virtual Tensors Apply(Tensors inputs, Tensors states = null, bool trainin _handle_activity_regularization(inputs, outputs); _set_mask_metadata(inputs, outputs, null); + // TODO(Rinne): set save spec if null + scope.__exit__(); return outputs; diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 66c3cdc1a..efca93009 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -709,10 +709,7 @@ public IRnnCell SimpleRNNCell( public IRnnCell StackedRNNCells( IEnumerable cells) - => new StackedRNNCells(new StackedRNNCellsArgs - { - Cells = cells.ToList() - }); + => new StackedRNNCells(cells.ToList(), new StackedRNNCellsArgs()); /// /// @@ -757,9 +754,8 @@ public ILayer RNN( bool stateful = false, bool unroll = false, bool time_major = false) - => new RNN(new RNNArgs + => new RNN(cell, new RNNArgs { - Cell = cell, ReturnSequences = return_sequences, ReturnState = return_state, GoBackwards = go_backwards, @@ -776,9 +772,8 @@ public ILayer RNN( bool stateful = false, bool unroll = false, bool time_major = false) - => new RNN(new RNNArgs + => new RNN(cell, new RNNArgs { - Cells = cell.ToList(), ReturnSequences = return_sequences, ReturnState = return_state, GoBackwards = go_backwards, @@ -798,7 +793,7 @@ public IRnnCell LSTMCell(int uints, bool unit_forget_bias = true, float dropout = 0f, float recurrent_dropout = 0f, - int implementation = 2) + int implementation = 1) => new LSTMCell(new LSTMCellArgs { Units = uints, @@ -851,7 +846,7 @@ public ILayer LSTM(int units, bool unit_forget_bias = true, float dropout = 0f, float recurrent_dropout = 0f, - int implementation = 2, + int implementation = 1, bool return_sequences = false, bool return_state = false, bool go_backwards = false, diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs index 1449c908e..025465fd6 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs @@ -2,6 +2,7 @@ using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; using Tensorflow.Common.Types; +using Tensorflow.Common.Extensions; namespace Tensorflow.Keras.Layers.Rnn { @@ -14,22 +15,105 @@ namespace Tensorflow.Keras.Layers.Rnn public class LSTM : RNN { LSTMArgs args; - InputSpec[] state_spec; - - int units => args.Units; + InputSpec[] _state_spec; + InputSpec _input_spec; + bool _could_use_gpu_kernel; public LSTM(LSTMArgs args) : - base(args) + base(CreateCell(args), args) { this.args = args; - state_spec = new[] { units, units } - .Select(dim => new InputSpec(shape: (-1, dim))) - .ToArray(); + _input_spec = new InputSpec(ndim: 3); + _state_spec = new[] { args.Units, args.Units }.Select(dim => new InputSpec(shape: (-1, dim))).ToArray(); + _could_use_gpu_kernel = args.Activation == keras.activations.Tanh + && args.RecurrentActivation == keras.activations.Sigmoid + && args.RecurrentDropout == 0 && !args.Unroll && args.UseBias + && ops.executing_eagerly_outside_functions(); + } + + private static IRnnCell CreateCell(LSTMArgs lstmArgs) + { + return new LSTMCell(new LSTMCellArgs() + { + Units = lstmArgs.Units, + Activation = lstmArgs.Activation, + RecurrentActivation = lstmArgs.RecurrentActivation, + UseBias = lstmArgs.UseBias, + KernelInitializer = lstmArgs.KernelInitializer, + RecurrentInitializer = lstmArgs.RecurrentInitializer, + UnitForgetBias = lstmArgs.UnitForgetBias, + BiasInitializer = lstmArgs.BiasInitializer, + // TODO(Rinne): kernel_regularizer + // TODO(Rinne): recurrent_regularizer + // TODO(Rinne): bias_regularizer + // TODO(Rinne): kernel_constriant + // TODO(Rinne): recurrent_constriant + // TODO(Rinne): bias_constriant + Dropout = lstmArgs.Dropout, + RecurrentDropout = lstmArgs.RecurrentDropout, + Implementation = lstmArgs.Implementation, + DType = lstmArgs.DType, + Trainable = lstmArgs.Trainable + }); } - protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) + protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bool? training = null, IOptionalArgs? optional_args = null) { - return base.Call(inputs, initial_state: state, training: training); + // skip the condition of ragged input + + (inputs, initial_state, _) = _process_inputs(inputs, initial_state, null); + + Tensor mask = null; + if(optional_args is RnnOptionalArgs rnnArgs) + { + mask = rnnArgs.Mask; + } + + var single_input = inputs.Single; + var input_shape = single_input.shape; + var timesteps = args.TimeMajor ? input_shape[0] : input_shape[1]; + + _maybe_reset_cell_dropout_mask(Cell); + + Func step = (inputs, states) => + { + var res = Cell.Apply(inputs, states, training is null ? true : training.Value); + var (output, state) = res; + return (output, state); + }; + + var (last_output, outputs, states) = keras.backend.rnn( + step, + inputs, + initial_state, + constants: null, + go_backwards: args.GoBackwards, + mask: mask, + unroll: args.Unroll, + input_length: ops.convert_to_tensor(timesteps), + time_major: args.TimeMajor, + zero_output_for_mask: args.ZeroOutputForMask, + return_all_outputs: args.ReturnSequences + ); + + Tensor output; + if (args.ReturnSequences) + { + output = keras.backend.maybe_convert_to_ragged(false, outputs, (int)timesteps, args.GoBackwards); + } + else + { + output = last_output; + } + + if (args.ReturnState) + { + return new Tensor[] { output }.Concat(states).ToArray().ToTensors(); + } + else + { + return output; + } } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs index 17042767d..bb71a914c 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs @@ -1,5 +1,6 @@ using Serilog.Core; using System.Diagnostics; +using Tensorflow.Common.Extensions; using Tensorflow.Common.Types; using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; @@ -81,7 +82,7 @@ Tensor bias_initializer() _bias_initializer = _args.BiasInitializer; } _bias = add_weight("bias", (_args.Units * 4), - initializer: _args.BiasInitializer); + initializer: _bias_initializer); } built = true; } @@ -94,7 +95,6 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra var rec_dp_mask = get_recurrent_dropout_mask_for_cell( h_tm1, training.Value, count: 4); - Tensor c; Tensor o; if (_args.Implementation == 1) @@ -123,7 +123,7 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra var x_f = math_ops.matmul(inputs_f, k_f); var x_c = math_ops.matmul(inputs_c, k_c); var x_o = math_ops.matmul(inputs_o, k_o); - if(_args.UseBias) + if (_args.UseBias) { var b = tf.split(_bias.AsTensor(), num_split: 4, axis: 0); Tensor b_i = b[0], b_f = b[1], b_c = b[2], b_o = b[3]; @@ -170,7 +170,7 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra } var h = o * _args.Activation.Apply(c); // 这里是因为 Tensors 类初始化的时候会把第一个元素之后的元素打包成一个数组 - return new Tensors(h, h, c); + return new Nest(new INestStructure[] { new NestNode(h), new NestList(h, c) }).ToTensors(); } /// @@ -188,22 +188,21 @@ public Tensors _compute_carry_and_output(Tensor[] x, Tensor[] h_tm1, Tensor c_tm h_tm1_o = h_tm1[3]; var _recurrent_kernel_tensor = _recurrent_kernel.AsTensor(); - var startIndex = _recurrent_kernel_tensor.shape[0]; - var endIndex = _recurrent_kernel_tensor.shape[1]; + int startIndex = (int)_recurrent_kernel_tensor.shape[0]; var _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, new[] { 0, 0 }, new[] { startIndex, _args.Units }); var i = _args.RecurrentActivation.Apply( x_i + math_ops.matmul(h_tm1_i, _recurrent_kernel_slice)); _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, - new[] { 0, _args.Units }, new[] { startIndex, _args.Units * 2}); + new[] { 0, _args.Units }, new[] { startIndex, _args.Units}); var f = _args.RecurrentActivation.Apply( x_f + math_ops.matmul(h_tm1_f, _recurrent_kernel_slice)); _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, - new[] { 0, _args.Units * 2 }, new[] { startIndex, _args.Units * 3 }); + new[] { 0, _args.Units * 2 }, new[] { startIndex, _args.Units }); var c = f * c_tm1 + i * _args.Activation.Apply( x_c + math_ops.matmul(h_tm1_c, _recurrent_kernel_slice)); _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, - new[] { 0, _args.Units * 3 }, new[] { startIndex, endIndex }); + new[] { 0, _args.Units * 3 }, new[] { startIndex, _args.Units }); var o = _args.RecurrentActivation.Apply( x_o + math_ops.matmul(h_tm1_o, _recurrent_kernel_slice)); diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index 0aeacc25d..f86de8a85 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -45,23 +45,25 @@ protected IRnnCell Cell } } - public RNN(RNNArgs args) : base(PreConstruct(args)) + public RNN(IRnnCell cell, RNNArgs args) : base(PreConstruct(args)) { _args = args; SupportsMasking = true; - // if is StackedRnncell - if (args.Cells != null) - { - Cell = new StackedRNNCells(new StackedRNNCellsArgs - { - Cells = args.Cells - }); - } - else - { - Cell = args.Cell; - } + Cell = cell; + + // get input_shape + _args = PreConstruct(args); + + _num_constants = 0; + } + + public RNN(IEnumerable cells, RNNArgs args) : base(PreConstruct(args)) + { + _args = args; + SupportsMasking = true; + + Cell = new StackedRNNCells(cells, new StackedRNNCellsArgs()); // get input_shape _args = PreConstruct(args); @@ -330,7 +332,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo states = new Tensors(states.SkipLast(_num_constants).ToArray()); states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states; var (output, new_states) = Cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants }); - return (output, new_states.Single); + return (output, new_states); }; } else @@ -382,6 +384,11 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo } else { + //var tapeSet = tf.GetTapeSet(); + //foreach(var tape in tapeSet) + //{ + // tape.Watch(output); + //} return output; } } @@ -405,7 +412,7 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo throw new NotImplementedException(); } - private (Tensors inputs, Tensors initial_state, Tensors constants) _process_inputs(Tensors inputs, Tensors initial_state, Tensors constants) + protected (Tensors inputs, Tensors initial_state, Tensors constants) _process_inputs(Tensors inputs, Tensors initial_state, Tensors constants) { if (inputs.Length > 1) { @@ -484,7 +491,7 @@ private void _validate_args_if_ragged(bool is_ragged_input, Tensors mask) } - void _maybe_reset_cell_dropout_mask(ILayer cell) + protected void _maybe_reset_cell_dropout_mask(ILayer cell) { if (cell is DropoutRNNCellMixin CellDRCMixin) { @@ -495,26 +502,21 @@ void _maybe_reset_cell_dropout_mask(ILayer cell) private static RNNArgs PreConstruct(RNNArgs args) { - if (args.Kwargs == null) - { - args.Kwargs = new Dictionary(); - } - // If true, the output for masked timestep will be zeros, whereas in the // false case, output from previous timestep is returned for masked timestep. - var zeroOutputForMask = (bool)args.Kwargs.Get("zero_output_for_mask", false); + var zeroOutputForMask = args.ZeroOutputForMask; Shape input_shape; - var propIS = (Shape)args.Kwargs.Get("input_shape", null); - var propID = (int?)args.Kwargs.Get("input_dim", null); - var propIL = (int?)args.Kwargs.Get("input_length", null); + var propIS = args.InputShape; + var propID = args.InputDim; + var propIL = args.InputLength; if (propIS == null && (propID != null || propIL != null)) { input_shape = new Shape( propIL ?? -1, propID ?? -1); - args.Kwargs["input_shape"] = input_shape; + args.InputShape = input_shape; } return args; diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs index 551c20cdd..a22f31c7d 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs @@ -10,14 +10,14 @@ namespace Tensorflow.Keras.Layers.Rnn public class SimpleRNN : RNN { SimpleRNNArgs args; - public SimpleRNN(SimpleRNNArgs args) : base(CreateCellForArgs(args)) + public SimpleRNN(SimpleRNNArgs args) : base(CreateCellForArgs(args), args) { this.args = args; } - private static SimpleRNNArgs CreateCellForArgs(SimpleRNNArgs args) + private static SimpleRNNCell CreateCellForArgs(SimpleRNNArgs args) { - args.Cell = new SimpleRNNCell(new SimpleRNNCellArgs() + return new SimpleRNNCell(new SimpleRNNCellArgs() { Units = args.Units, Activation = args.Activation, @@ -30,7 +30,6 @@ private static SimpleRNNArgs CreateCellForArgs(SimpleRNNArgs args) DType = args.DType, Trainable = args.Trainable, }); - return args; } } } \ No newline at end of file diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs index 8fdc598ed..c77f77790 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs @@ -115,10 +115,5 @@ protected override Tensors Call(Tensors inputs, Tensors states = null, bool? tra return new Tensors(output, output); } } - - public Tensors get_initial_state(Tensors inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid) - { - return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size, dtype); - } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs index 3e7b227c2..8799bfb23 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs @@ -15,15 +15,11 @@ public class StackedRNNCells : Layer, IRnnCell public IList Cells { get; set; } public bool _reverse_state_order; - public StackedRNNCells(StackedRNNCellsArgs args) : base(args) + public StackedRNNCells(IEnumerable cells, StackedRNNCellsArgs args) : base(args) { - if (args.Kwargs == null) - { - args.Kwargs = new Dictionary(); - } - Cells = args.Cells; - - _reverse_state_order = (bool)args.Kwargs.Get("reverse_state_order", false); + Cells = cells.ToList(); + + _reverse_state_order = args.ReverseStateOrder; if (_reverse_state_order) { diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index 54ea1565b..ed9b6ae95 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -55,30 +55,56 @@ public void LSTMCell() Assert.AreEqual((2, 4), new_states[0].shape); } + [TestMethod] + public void TrainLSTMWithMnist() + { + var input = keras.Input((784)); + var x = keras.layers.Reshape((28, 28)).Apply(input); + //x = keras.layers.LSTM(50, return_sequences: true).Apply(x); + //x = keras.layers.LSTM(100, return_sequences: true).Apply(x); + //x = keras.layers.LSTM(150, return_sequences: true).Apply(x); + x = keras.layers.LSTM(4, implementation: 2).Apply(x); + //x = keras.layers.Dense(100).Apply(x); + var output = keras.layers.Dense(10, activation: "softmax").Apply(x); + + var model = keras.Model(input, output); + model.summary(); + model.compile(keras.optimizers.Adam(), keras.losses.SparseCategoricalCrossentropy(), new string[] { "accuracy" }); + + var data_loader = new MnistModelLoader(); + var dataset = data_loader.LoadAsync(new ModelLoadSetting + { + TrainDir = "mnist", + OneHot = false, + ValidationSize = 58000, + }).Result; + + model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 30); + } + [TestMethod] public void SimpleRNN() { - //var inputs = np.arange(6 * 10 * 8).reshape((6, 10, 8)).astype(np.float32); - ///*var simple_rnn = keras.layers.SimpleRNN(4); - //var output = simple_rnn.Apply(inputs); - //Assert.AreEqual((32, 4), output.shape);*/ - - //var simple_rnn = tf.keras.layers.SimpleRNN(4, return_sequences: true, return_state: true); - //var (whole_sequence_output, final_state) = simple_rnn.Apply(inputs); - //Assert.AreEqual((6, 10, 4), whole_sequence_output.shape); - //Assert.AreEqual((6, 4), final_state.shape); + var input = keras.Input((784)); + var x = keras.layers.Reshape((28, 28)).Apply(input); + x = keras.layers.SimpleRNN(10).Apply(x); + var output = keras.layers.Dense(10, activation: "softmax").Apply(x); - var inputs = keras.Input(shape: (10, 8)); - var x = keras.layers.SimpleRNN(4).Apply(inputs); - var output = keras.layers.Dense(10).Apply(x); - var model = keras.Model(inputs, output); + var model = keras.Model(input, output); model.summary(); + model.compile(keras.optimizers.Adam(), keras.losses.CategoricalCrossentropy(), new string[] { "accuracy" }); - model.compile(keras.optimizers.Adam(), keras.losses.SparseCategoricalCrossentropy()); - var datax = np.ones((16, 10, 8), dtype: dtypes.float32); - var datay = np.ones((16)); - model.fit(datax, datay, epochs: 20); + var data_loader = new MnistModelLoader(); + var dataset = data_loader.LoadAsync(new ModelLoadSetting + { + TrainDir = "mnist", + OneHot = false, + ValidationSize = 58000, + }).Result; + + model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 10); } + [TestMethod] public void RNNForSimpleRNNCell() { @@ -109,19 +135,5 @@ public void RNNForLSTMCell() Console.WriteLine($"output: {output}"); Assert.AreEqual((5, 4), output.shape); } - - [TestMethod] - public void MyTest() - { - var a = tf.zeros((2, 3)); - var b = tf.ones_like(a); - var c = tf.ones((3,4)); - - var d = new Tensors { a, b, c }; - var (A, BC) = d; - Console.WriteLine($"A:{A}"); - Console.WriteLine($"BC:{BC}"); - } - } } diff --git a/tools/Tensorflow.CodeGen/OpClassifier.cs b/tools/Tensorflow.CodeGen/OpClassifier.cs index eaad3fec8..2d22c5d22 100644 --- a/tools/Tensorflow.CodeGen/OpClassifier.cs +++ b/tools/Tensorflow.CodeGen/OpClassifier.cs @@ -9,7 +9,7 @@ namespace Tensorflow.CodeGen { public class OpClassifier { - private static readonly string _filenamePattern = @"^gen_[a-z]*_ops.py$"; + private static readonly string _filenamePattern = @"^gen_[a-z_]*_ops.py$"; private static readonly string _pythonFunctionPattern = @"def\s+(\w+\d*\w*)\((?:\s*\w+\s*(?:=\s*[\S]*)*,\s*)*\s*name=None\):"; private Dictionary> _opSet = new(); public Dictionary> OpSet => _opSet; diff --git a/tools/Tensorflow.CodeGen/Utils.cs b/tools/Tensorflow.CodeGen/Utils.cs index 19de6c0e0..6c69b7f95 100644 --- a/tools/Tensorflow.CodeGen/Utils.cs +++ b/tools/Tensorflow.CodeGen/Utils.cs @@ -178,10 +178,25 @@ public static OpList ReadAllOpDefs(string path) else if (attr.Type == "list(shape)") { res.Add((attr.Name, "Shape[]", "NOVALUE")); + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List exps = new(); + foreach (var value in attr.DefaultValue.List.Shape) + { + exps.Add($"new Shape({string.Join(", ", value.Dim.Select(x => x.Size))})"); + } + string expression = "new Shape[]{" + $"{string.Join(", ", exps)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "string[]", $"null")); + } + else + { + res.Add((attr.Name, "string[]", "NOVALUE")); + } } else if (attr.Type == "list(string)") { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) { List values = new(); foreach (var value in attr.DefaultValue.List.S) From a0df8109f83c343b3fb92e70871e95e495974262 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Sun, 18 Jun 2023 03:45:11 +0800 Subject: [PATCH 043/182] fix: training LSTM does not align with tensorflow. --- src/TensorFlowNET.Core/Binding.Util.cs | 2 +- .../Eager/EagerRunner.TFE_TapeGradient.cs | 2 +- .../Eager/EagerTensor.ToString.cs | 7 +++++- .../Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs | 2 +- .../Keras/Layers/ILayersApi.cs | 2 +- src/TensorFlowNET.Core/NumPy/NDArrayRender.cs | 18 +++++++-------- .../Initializers/NpyLoadInitializer.cs | 22 +++++++++++++++++++ .../Tensorflow.Binding.csproj | 2 +- src/TensorFlowNET.Core/Training/Trackable.cs | 3 +-- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 7 +++--- .../Layers/Rnn/LSTMCell.cs | 11 ++++++---- .../Layers/Rnn.Test.cs | 17 ++++++-------- tools/Tensorflow.CodeGen/FunctionGenerator.cs | 8 +++++-- .../Tensorflow.CodeGen.csproj | 2 +- 14 files changed, 68 insertions(+), 37 deletions(-) create mode 100644 src/TensorFlowNET.Core/Operations/Initializers/NpyLoadInitializer.cs diff --git a/src/TensorFlowNET.Core/Binding.Util.cs b/src/TensorFlowNET.Core/Binding.Util.cs index 8df39334a..c5705930e 100644 --- a/src/TensorFlowNET.Core/Binding.Util.cs +++ b/src/TensorFlowNET.Core/Binding.Util.cs @@ -503,7 +503,7 @@ public static TF_DataType GetDataType(this object data) case Tensors tensors: return tensors.dtype; case IEnumerable tensors: - return tensors.First().dtype; + return tensors.Where(x => x is not null).First().dtype; case RefVariable variable: return variable.dtype; case ResourceVariable variable: diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs index 849dcb3f2..3515fed83 100644 --- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs +++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs @@ -65,7 +65,7 @@ public Tensor[] TFE_TapeGradient(ITape tape, { outgrad_vec = output_gradients.ToList(); } - var result = tape.ComputeGradient(target_vec, sources_vec, source_tensors_that_are_targets, outgrad_vec, false); + var result = tape.ComputeGradient(target_vec, sources_vec, source_tensors_that_are_targets, outgrad_vec, true); bool unconnected_gradients_zero = unconnected_gradients == "zero"; diff --git a/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs b/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs index ce3c983b5..71b3075aa 100644 --- a/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs +++ b/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs @@ -10,6 +10,11 @@ public override string ToString() var str = NDArrayRender.ToString(nd); return $"tf.Tensor: shape={shape}, dtype={dtype.as_numpy_name()}, numpy={str}"; } - + public string ToString(int maxLength) + { + var nd = new NDArray(this); + var str = NDArrayRender.ToString(nd, maxLength); + return $"tf.Tensor: shape={shape}, dtype={dtype.as_numpy_name()}, numpy={str}"; + } } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs index 1b26c05ca..786236e4d 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs @@ -29,7 +29,7 @@ public class LSTMCellArgs : AutoSerializeLayerArgs [JsonProperty("unit_forget_bias")] public bool UnitForgetBias { get; set; } = true; [JsonProperty("implementation")] - public int Implementation { get; set; } = 1; + public int Implementation { get; set; } = 2; } } diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index 1eb08e77e..a19508d42 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -182,7 +182,7 @@ public ILayer LSTM(int units, bool unit_forget_bias = true, float dropout = 0f, float recurrent_dropout = 0f, - int implementation = 1, + int implementation = 2, bool return_sequences = false, bool return_state = false, bool go_backwards = false, diff --git a/src/TensorFlowNET.Core/NumPy/NDArrayRender.cs b/src/TensorFlowNET.Core/NumPy/NDArrayRender.cs index 02cb5926c..230797b8b 100644 --- a/src/TensorFlowNET.Core/NumPy/NDArrayRender.cs +++ b/src/TensorFlowNET.Core/NumPy/NDArrayRender.cs @@ -7,7 +7,7 @@ namespace Tensorflow.NumPy { public class NDArrayRender { - public static string ToString(NDArray array) + public static string ToString(NDArray array, int maxLength = 10) { Shape shape = array.shape; if (shape.IsScalar) @@ -15,12 +15,12 @@ public static string ToString(NDArray array) var s = new StringBuilder(); s.Append("array("); - Build(s, array); + Build(s, array, maxLength); s.Append(")"); return s.ToString(); } - static void Build(StringBuilder s, NDArray array) + static void Build(StringBuilder s, NDArray array, int maxLength) { var shape = array.shape; @@ -35,11 +35,11 @@ static void Build(StringBuilder s, NDArray array) var len = shape[0]; s.Append("["); - if (len <= 10) + if (len <= maxLength) { for (int i = 0; i < len; i++) { - Build(s, array[i]); + Build(s, array[i], maxLength); if (i < len - 1) { s.Append(", "); @@ -49,9 +49,9 @@ static void Build(StringBuilder s, NDArray array) } else { - for (int i = 0; i < 5; i++) + for (int i = 0; i < maxLength / 2; i++) { - Build(s, array[i]); + Build(s, array[i], maxLength); if (i < len - 1) { s.Append(", "); @@ -62,9 +62,9 @@ static void Build(StringBuilder s, NDArray array) s.Append(" ... "); s.AppendLine(); - for (int i = (int)len - 5; i < len; i++) + for (int i = (int)len - maxLength / 2; i < len; i++) { - Build(s, array[i]); + Build(s, array[i], maxLength); if (i < len - 1) { s.Append(", "); diff --git a/src/TensorFlowNET.Core/Operations/Initializers/NpyLoadInitializer.cs b/src/TensorFlowNET.Core/Operations/Initializers/NpyLoadInitializer.cs new file mode 100644 index 000000000..202af652a --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/Initializers/NpyLoadInitializer.cs @@ -0,0 +1,22 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.NumPy; + +namespace Tensorflow.Operations.Initializers +{ + /// + /// An initializer specially used for debugging (to load weights from disk). + /// + class NpyLoadInitializer : IInitializer + { + string _path; + public NpyLoadInitializer(string path) { _path = path; } + public string ClassName => ""; + public IDictionary Config => new Dictionary(); + public Tensor Apply(InitializerArgs args) + { + return np.load(_path); + } + } +} diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index b08b2e2b7..02578ec18 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -111,7 +111,7 @@ https://tensorflownet.readthedocs.io - + diff --git a/src/TensorFlowNET.Core/Training/Trackable.cs b/src/TensorFlowNET.Core/Training/Trackable.cs index 2b5bf2a72..3eff34875 100644 --- a/src/TensorFlowNET.Core/Training/Trackable.cs +++ b/src/TensorFlowNET.Core/Training/Trackable.cs @@ -179,8 +179,7 @@ protected virtual IVariableV1 _add_variable_with_custom_getter(VariableArgs args // handles slot variables. if (!args.Overwrite || new_variable is RefVariable || new_variable is Trackable) { - var temp = new_variable as Trackable; - var res = _track_trackable(temp, args.Name, args.Overwrite); + var res = _track_trackable(new_variable as Trackable, args.Name, args.Overwrite); Debug.Assert(res is IVariableV1); return res as IVariableV1; } diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index efca93009..0bdcbc841 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -793,7 +793,7 @@ public IRnnCell LSTMCell(int uints, bool unit_forget_bias = true, float dropout = 0f, float recurrent_dropout = 0f, - int implementation = 1) + int implementation = 2) => new LSTMCell(new LSTMCellArgs { Units = uints, @@ -846,7 +846,7 @@ public ILayer LSTM(int units, bool unit_forget_bias = true, float dropout = 0f, float recurrent_dropout = 0f, - int implementation = 1, + int implementation = 2, bool return_sequences = false, bool return_state = false, bool go_backwards = false, @@ -869,7 +869,8 @@ public ILayer LSTM(int units, GoBackwards = go_backwards, Stateful = stateful, TimeMajor = time_major, - Unroll = unroll + Unroll = unroll, + UnitForgetBias = unit_forget_bias }); /// diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs index bb71a914c..284a2b778 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs @@ -1,4 +1,5 @@ -using Serilog.Core; +using Newtonsoft.Json; +using Serilog.Core; using System.Diagnostics; using Tensorflow.Common.Extensions; using Tensorflow.Common.Types; @@ -54,6 +55,7 @@ public LSTMCell(LSTMCellArgs args) public override void build(KerasShapesWrapper input_shape) { + base.build(input_shape); var single_shape = input_shape.ToSingleShape(); var input_dim = single_shape[-1]; _kernel = add_weight("kernel", (input_dim, _args.Units * 4), @@ -82,7 +84,8 @@ Tensor bias_initializer() _bias_initializer = _args.BiasInitializer; } _bias = add_weight("bias", (_args.Units * 4), - initializer: _bias_initializer); + initializer: _bias_initializer + ); } built = true; } @@ -203,7 +206,7 @@ public Tensors _compute_carry_and_output(Tensor[] x, Tensor[] h_tm1, Tensor c_tm x_c + math_ops.matmul(h_tm1_c, _recurrent_kernel_slice)); _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, new[] { 0, _args.Units * 3 }, new[] { startIndex, _args.Units }); - var o = _args.RecurrentActivation.Apply( + var o = _args.Activation.Apply( x_o + math_ops.matmul(h_tm1_o, _recurrent_kernel_slice)); return new Tensors(c, o); @@ -220,7 +223,7 @@ public Tensors _compute_carry_and_output_fused(Tensor[] z, Tensor c_tm1) Tensor z0 = z[0], z1 = z[1], z2 = z[2], z3 = z[3]; var i = _args.RecurrentActivation.Apply(z0); var f = _args.RecurrentActivation.Apply(z1); - var c = f * c_tm1 + i * _args.RecurrentActivation.Apply(z2); + var c = f * c_tm1 + i * _args.Activation.Apply(z2); var o = _args.RecurrentActivation.Apply(z3); return new Tensors(c, o); } diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index ed9b6ae95..8eeee7a88 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -60,26 +60,23 @@ public void TrainLSTMWithMnist() { var input = keras.Input((784)); var x = keras.layers.Reshape((28, 28)).Apply(input); - //x = keras.layers.LSTM(50, return_sequences: true).Apply(x); - //x = keras.layers.LSTM(100, return_sequences: true).Apply(x); - //x = keras.layers.LSTM(150, return_sequences: true).Apply(x); - x = keras.layers.LSTM(4, implementation: 2).Apply(x); - //x = keras.layers.Dense(100).Apply(x); + x = keras.layers.LSTM(50, return_sequences: true).Apply(x); + x = keras.layers.LSTM(100).Apply(x); var output = keras.layers.Dense(10, activation: "softmax").Apply(x); var model = keras.Model(input, output); model.summary(); - model.compile(keras.optimizers.Adam(), keras.losses.SparseCategoricalCrossentropy(), new string[] { "accuracy" }); + model.compile(keras.optimizers.Adam(), keras.losses.CategoricalCrossentropy(), new string[] { "accuracy" }); var data_loader = new MnistModelLoader(); var dataset = data_loader.LoadAsync(new ModelLoadSetting { TrainDir = "mnist", - OneHot = false, - ValidationSize = 58000, + OneHot = true, + ValidationSize = 55000, }).Result; - model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 30); + model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 1); } [TestMethod] @@ -102,7 +99,7 @@ public void SimpleRNN() ValidationSize = 58000, }).Result; - model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 10); + model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 2); } [TestMethod] diff --git a/tools/Tensorflow.CodeGen/FunctionGenerator.cs b/tools/Tensorflow.CodeGen/FunctionGenerator.cs index bb07dddf5..f3687d6b4 100644 --- a/tools/Tensorflow.CodeGen/FunctionGenerator.cs +++ b/tools/Tensorflow.CodeGen/FunctionGenerator.cs @@ -83,8 +83,12 @@ public void AppendFunction(OpDef op, StringBuilder sb) sb.AppendLine("}"); // try - sb.Append("catch(NotOkStatusException ex)\n{\n"); - sb.AppendLine("throw ex;"); + sb.Append("catch(NotOkStatusException ex1)\n{\n"); + sb.AppendLine("throw ex1;"); + sb.AppendLine("}"); // catch + + sb.Append("catch(InvalidArgumentError ex2)\n{\n"); + sb.AppendLine("throw ex2;"); sb.AppendLine("}"); // catch sb.Append("catch(Exception)\n{\n"); diff --git a/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index 4cb3368d0..03195e6ac 100644 --- a/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -9,7 +9,7 @@ - + From 02cb239c5ffb5a109297aaec047ffed35fc05269 Mon Sep 17 00:00:00 2001 From: Luc BOLOGNA Date: Sun, 4 Jun 2023 21:58:40 +0200 Subject: [PATCH 044/182] Refactor: Change Model evaluate IModel.Dictionary evaluate(NDArray, NDArray, ...) is now IModel.Dictionary evaluate(Tensor, Tensor, ...) Merge Model.Evaluate.test_step_multi_inputs_function(...) and Model.Evaluate.test_function(...) Note: An internal function need to add an explicit cast in Tensor --- src/TensorFlowNET.Core/Keras/Engine/IModel.cs | 2 +- src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs | 16 +++++----------- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 2 +- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs index 19f3df9ba..ddc72aeec 100644 --- a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs +++ b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs @@ -60,7 +60,7 @@ void load_weights(string filepath, bool skip_mismatch = false, object options = null); - Dictionary evaluate(NDArray x, NDArray y, + Dictionary evaluate(Tensor x, Tensor y, int batch_size = -1, int verbose = 1, int steps = -1, diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index 185de4f48..a71f7f395 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -27,7 +27,7 @@ public partial class Model /// /// /// - public Dictionary evaluate(NDArray x, NDArray y, + public Dictionary evaluate(Tensor x, Tensor y, int batch_size = -1, int verbose = 1, int steps = -1, @@ -91,7 +91,7 @@ public Dictionary evaluate(NDArray x, NDArray y, return results; } - public Dictionary evaluate(IEnumerable x, NDArray y, int verbose = 1, bool is_val = false) + public Dictionary evaluate(IEnumerable x, Tensor y, int verbose = 1, bool is_val = false) { var data_handler = new DataHandler(new DataHandlerArgs { @@ -119,7 +119,7 @@ public Dictionary evaluate(IEnumerable x, NDArray y, int foreach (var step in data_handler.steps()) { callbacks.on_test_batch_begin(step); - logs = test_step_multi_inputs_function(data_handler, iterator); + logs = test_function(data_handler, iterator); var end_step = step + data_handler.StepIncrement; if (is_val == false) callbacks.on_test_batch_end(end_step, logs); @@ -178,20 +178,14 @@ public Dictionary evaluate(IDatasetV2 x, int verbose = 1, bool is } Dictionary test_function(DataHandler data_handler, OwnedIterator iterator) - { - var data = iterator.next(); - var outputs = test_step(data_handler, data[0], data[1]); - tf_with(ops.control_dependencies(new object[0]), ctl => _test_counter.assign_add(1)); - return outputs; - } - Dictionary test_step_multi_inputs_function(DataHandler data_handler, OwnedIterator iterator) { var data = iterator.next(); var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; var outputs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size))); - tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1)); + tf_with(ops.control_dependencies(new object[0]), ctl => _test_counter.assign_add(1)); return outputs; } + Dictionary test_step(DataHandler data_handler, Tensor x, Tensor y) { (x, y) = data_handler.DataAdapter.Expand1d(x, y); diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index bb8e18ccf..17ecde984 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -266,7 +266,7 @@ History FitInternal(DataHandler data_handler, int epochs, int verbose, List Date: Mon, 5 Jun 2023 00:01:53 +0200 Subject: [PATCH 045/182] Refactor: Model.Evaluate.cs --- .../Engine/Model.Evaluate.cs | 129 +++++------------- 1 file changed, 36 insertions(+), 93 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index a71f7f395..85c262a9c 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -14,6 +14,38 @@ namespace Tensorflow.Keras.Engine { public partial class Model { + protected Dictionary evaluate(CallbackList callbacks, DataHandler data_handler, bool is_val) + { + callbacks.on_test_begin(); + + //Dictionary? logs = null; + var logs = new Dictionary(); + int x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; + foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) + { + reset_metrics(); + callbacks.on_epoch_begin(epoch); + // data_handler.catch_stop_iteration(); + + foreach (var step in data_handler.steps()) + { + callbacks.on_test_batch_begin(step); + + var data = iterator.next(); + + logs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size))); + tf_with(ops.control_dependencies(Array.Empty()), ctl => _test_counter.assign_add(1)); + + var end_step = step + data_handler.StepIncrement; + + if (!is_val) + callbacks.on_test_batch_end(end_step, logs); + } + } + + return logs; + } + /// /// Returns the loss value & metrics values for the model in test mode. /// @@ -64,31 +96,8 @@ public Dictionary evaluate(Tensor x, Tensor y, Verbose = verbose, Steps = data_handler.Inferredsteps }); - callbacks.on_test_begin(); - - //Dictionary? logs = null; - var logs = new Dictionary(); - foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) - { - reset_metrics(); - // data_handler.catch_stop_iteration(); - foreach (var step in data_handler.steps()) - { - callbacks.on_test_batch_begin(step); - logs = test_function(data_handler, iterator); - var end_step = step + data_handler.StepIncrement; - if (is_val == false) - callbacks.on_test_batch_end(end_step, logs); - } - } - - var results = new Dictionary(); - foreach (var log in logs) - { - results[log.Key] = log.Value; - } - return results; + return evaluate(callbacks, data_handler, is_val); } public Dictionary evaluate(IEnumerable x, Tensor y, int verbose = 1, bool is_val = false) @@ -107,31 +116,8 @@ public Dictionary evaluate(IEnumerable x, Tensor y, int v Verbose = verbose, Steps = data_handler.Inferredsteps }); - callbacks.on_test_begin(); - Dictionary logs = null; - foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) - { - reset_metrics(); - callbacks.on_epoch_begin(epoch); - // data_handler.catch_stop_iteration(); - - foreach (var step in data_handler.steps()) - { - callbacks.on_test_batch_begin(step); - logs = test_function(data_handler, iterator); - var end_step = step + data_handler.StepIncrement; - if (is_val == false) - callbacks.on_test_batch_end(end_step, logs); - } - } - - var results = new Dictionary(); - foreach (var log in logs) - { - results[log.Key] = log.Value; - } - return results; + return evaluate(callbacks, data_handler, is_val); } @@ -150,51 +136,8 @@ public Dictionary evaluate(IDatasetV2 x, int verbose = 1, bool is Verbose = verbose, Steps = data_handler.Inferredsteps }); - callbacks.on_test_begin(); - - Dictionary logs = null; - foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) - { - reset_metrics(); - callbacks.on_epoch_begin(epoch); - // data_handler.catch_stop_iteration(); - - foreach (var step in data_handler.steps()) - { - callbacks.on_test_batch_begin(step); - logs = test_function(data_handler, iterator); - var end_step = step + data_handler.StepIncrement; - if (is_val == false) - callbacks.on_test_batch_end(end_step, logs); - } - } - - var results = new Dictionary(); - foreach (var log in logs) - { - results[log.Key] = log.Value; - } - return results; - } - - Dictionary test_function(DataHandler data_handler, OwnedIterator iterator) - { - var data = iterator.next(); - var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; - var outputs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size))); - tf_with(ops.control_dependencies(new object[0]), ctl => _test_counter.assign_add(1)); - return outputs; - } - - Dictionary test_step(DataHandler data_handler, Tensor x, Tensor y) - { - (x, y) = data_handler.DataAdapter.Expand1d(x, y); - var y_pred = Apply(x, training: false); - var loss = compiled_loss.Call(y, y_pred); - - compiled_metrics.update_state(y, y_pred); - return metrics.Select(x => (x.Name, x.result())).ToDictionary(x=>x.Item1, x=>(float)x.Item2); + return evaluate(callbacks, data_handler, is_val); } } -} +} \ No newline at end of file From 0effee430c905f7ee84a064a4b1474ef931368a0 Mon Sep 17 00:00:00 2001 From: Luc Bologna Date: Mon, 5 Jun 2023 20:14:57 +0200 Subject: [PATCH 046/182] Update Model.Evaluate.cs Fix my bad: Bad handling between test_function and test_step_multi_inputs_function. --- .../Engine/Model.Evaluate.cs | 116 +++++++++++------- 1 file changed, 75 insertions(+), 41 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index 85c262a9c..99a891c0b 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -1,51 +1,19 @@ -using Tensorflow.NumPy; using System; using System.Collections.Generic; using System.Linq; +using Tensorflow; using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.Callbacks; using Tensorflow.Keras.Engine.DataAdapters; -using static Tensorflow.Binding; using Tensorflow.Keras.Layers; using Tensorflow.Keras.Utils; -using Tensorflow; -using Tensorflow.Keras.Callbacks; +using Tensorflow.NumPy; +using static Tensorflow.Binding; namespace Tensorflow.Keras.Engine { public partial class Model { - protected Dictionary evaluate(CallbackList callbacks, DataHandler data_handler, bool is_val) - { - callbacks.on_test_begin(); - - //Dictionary? logs = null; - var logs = new Dictionary(); - int x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; - foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) - { - reset_metrics(); - callbacks.on_epoch_begin(epoch); - // data_handler.catch_stop_iteration(); - - foreach (var step in data_handler.steps()) - { - callbacks.on_test_batch_begin(step); - - var data = iterator.next(); - - logs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size))); - tf_with(ops.control_dependencies(Array.Empty()), ctl => _test_counter.assign_add(1)); - - var end_step = step + data_handler.StepIncrement; - - if (!is_val) - callbacks.on_test_batch_end(end_step, logs); - } - } - - return logs; - } - /// /// Returns the loss value & metrics values for the model in test mode. /// @@ -97,7 +65,7 @@ public Dictionary evaluate(Tensor x, Tensor y, Steps = data_handler.Inferredsteps }); - return evaluate(callbacks, data_handler, is_val); + return evaluate(data_handler, callbacks, is_val, test_function); } public Dictionary evaluate(IEnumerable x, Tensor y, int verbose = 1, bool is_val = false) @@ -117,10 +85,9 @@ public Dictionary evaluate(IEnumerable x, Tensor y, int v Steps = data_handler.Inferredsteps }); - return evaluate(callbacks, data_handler, is_val); + return evaluate(data_handler, callbacks, is_val, test_step_multi_inputs_function); } - public Dictionary evaluate(IDatasetV2 x, int verbose = 1, bool is_val = false) { var data_handler = new DataHandler(new DataHandlerArgs @@ -137,7 +104,74 @@ public Dictionary evaluate(IDatasetV2 x, int verbose = 1, bool is Steps = data_handler.Inferredsteps }); - return evaluate(callbacks, data_handler, is_val); + return evaluate(data_handler, callbacks, is_val, test_function); + } + + /// + /// Internal bare implementation of evaluate function. + /// + /// Interations handling objects + /// + /// The function to be called on each batch of data. + /// Whether it is validation or test. + /// + Dictionary evaluate(DataHandler data_handler, CallbackList callbacks, bool is_val, Func> test_func) + { + callbacks.on_test_begin(); + + var results = new Dictionary(); + var logs = results; + foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) + { + reset_metrics(); + callbacks.on_epoch_begin(epoch); + // data_handler.catch_stop_iteration(); + + foreach (var step in data_handler.steps()) + { + callbacks.on_test_batch_begin(step); + + var data = iterator.next(); + + logs = test_func(data_handler, iterator.next()); + + tf_with(ops.control_dependencies(Array.Empty()), ctl => _train_counter.assign_add(1)); + + var end_step = step + data_handler.StepIncrement; + if (!is_val) + callbacks.on_test_batch_end(end_step, logs); + } + + if (!is_val) + callbacks.on_epoch_end(epoch, logs); + } + + foreach (var log in logs) + { + results[log.Key] = log.Value; + } + + return results; + } + + Dictionary test_function(DataHandler data_handler, Tensor[] data) + { + var (x, y) = data_handler.DataAdapter.Expand1d(data[0], data[1]); + + var y_pred = Apply(x, training: false); + var loss = compiled_loss.Call(y, y_pred); + + compiled_metrics.update_state(y, y_pred); + + var outputs = metrics.Select(x => (x.Name, x.result())).ToDictionary(x => x.Name, x => (float)x.Item2); + return outputs; + } + + Dictionary test_step_multi_inputs_function(DataHandler data_handler, Tensor[] data) + { + var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; + var outputs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size))); + return outputs; } } -} \ No newline at end of file +} From a8288af655d966e09484e04fc5c0cd6cf00ef0f7 Mon Sep 17 00:00:00 2001 From: Luc Bologna Date: Mon, 5 Jun 2023 21:15:57 +0200 Subject: [PATCH 047/182] Update Model.Evaluate.cs --- src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index 99a891c0b..912f5e06d 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -131,8 +131,6 @@ Dictionary evaluate(DataHandler data_handler, CallbackList callba { callbacks.on_test_batch_begin(step); - var data = iterator.next(); - logs = test_func(data_handler, iterator.next()); tf_with(ops.control_dependencies(Array.Empty()), ctl => _train_counter.assign_add(1)); From e1ece662643ac4daa98c3390f4a1d790dcff5270 Mon Sep 17 00:00:00 2001 From: Luc BOLOGNA Date: Sat, 17 Jun 2023 22:24:48 +0200 Subject: [PATCH 048/182] Refactor: remove useless unsafe on tensor implicit cast --- src/TensorFlowNET.Core/Tensors/Tensors.cs | 24 +++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs index d063ee39f..8d382d619 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensors.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs @@ -90,73 +90,73 @@ public T[] ToArray() where T: unmanaged } #region Explicit Conversions - public unsafe static explicit operator bool(Tensors tensor) + public static explicit operator bool(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to bool"); return (bool)tensor[0]; } - public unsafe static explicit operator sbyte(Tensors tensor) + public static explicit operator sbyte(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to sbyte"); return (sbyte)tensor[0]; } - public unsafe static explicit operator byte(Tensors tensor) + public static explicit operator byte(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to byte"); return (byte)tensor[0]; } - public unsafe static explicit operator ushort(Tensors tensor) + public static explicit operator ushort(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to ushort"); return (ushort)tensor[0]; } - public unsafe static explicit operator short(Tensors tensor) + public static explicit operator short(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to short"); return (short)tensor[0]; } - public unsafe static explicit operator int(Tensors tensor) + public static explicit operator int(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to int"); return (int)tensor[0]; } - public unsafe static explicit operator uint(Tensors tensor) + public static explicit operator uint(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to uint"); return (uint)tensor[0]; } - public unsafe static explicit operator long(Tensors tensor) + public static explicit operator long(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to long"); return (long)tensor[0]; } - public unsafe static explicit operator ulong(Tensors tensor) + public static explicit operator ulong(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to ulong"); return (ulong)tensor[0]; } - public unsafe static explicit operator float(Tensors tensor) + public static explicit operator float(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to byte"); return (byte)tensor[0]; } - public unsafe static explicit operator double(Tensors tensor) + public static explicit operator double(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to double"); return (double)tensor[0]; } - public unsafe static explicit operator string(Tensors tensor) + public static explicit operator string(Tensors tensor) { EnsureSingleTensor(tensor, "explicit conversion to string"); return (string)tensor[0]; From 35d2e107f325dc0070cde780a9f8d491cfe2c4f8 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Sun, 18 Jun 2023 12:15:56 +0800 Subject: [PATCH 049/182] refactor model.evaluate to deal with confilict --- src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index 912f5e06d..eaa9eb23c 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -72,7 +72,7 @@ public Dictionary evaluate(IEnumerable x, Tensor y, int v { var data_handler = new DataHandler(new DataHandlerArgs { - X = new Tensors(x), + X = new Tensors(x.ToArray()), Y = y, Model = this, StepsPerExecution = _steps_per_execution @@ -168,7 +168,8 @@ Dictionary test_function(DataHandler data_handler, Tensor[] data) Dictionary test_step_multi_inputs_function(DataHandler data_handler, Tensor[] data) { var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; - var outputs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size))); + var outputs = train_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray())); + tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1)); return outputs; } } From 1b1a50371b0829363d1f9c469aedbe727a6ec41f Mon Sep 17 00:00:00 2001 From: Visagan Guruparan <103048@smsassist.com> Date: Sun, 18 Jun 2023 22:46:36 -0500 Subject: [PATCH 050/182] np update square and dot product --- src/TensorFlowNET.Core/APIs/tf.math.cs | 15 ++++++++-- src/TensorFlowNET.Core/Binding.Util.cs | 23 ++++++++++++++- src/TensorFlowNET.Core/NumPy/Numpy.Math.cs | 21 ++++++++++++++ .../TensorFlowNET.UnitTest/Numpy/Math.Test.cs | 29 ++++++++++++++++++- 4 files changed, 84 insertions(+), 4 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index 75253700a..0e53d938a 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -14,6 +14,7 @@ You may obtain a copy of the License at limitations under the License. ******************************************************************************/ +using Tensorflow.NumPy; using Tensorflow.Operations; namespace Tensorflow @@ -42,7 +43,6 @@ public Tensor erf(Tensor x, string name = null) public Tensor multiply(Tensor x, Tensor y, string name = null) => math_ops.multiply(x, y, name: name); - public Tensor divide_no_nan(Tensor a, Tensor b, string name = null) => math_ops.div_no_nan(a, b); @@ -452,7 +452,18 @@ public Tensor multiply(Tensor x, Tensor y, string name = null) /// public Tensor multiply(Tx x, Ty y, string name = null) => gen_math_ops.mul(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); - + /// + /// return scalar product + /// + /// + /// + /// + /// + /// + /// + /// + public Tensor dot_prod(Tx x, Ty y, NDArray axes, string name = null) + => math_ops.tensordot(convert_to_tensor(x), convert_to_tensor(y), axes, name: name); public Tensor negative(Tensor x, string name = null) => gen_math_ops.neg(x, name); diff --git a/src/TensorFlowNET.Core/Binding.Util.cs b/src/TensorFlowNET.Core/Binding.Util.cs index 8df39334a..e414ef6e8 100644 --- a/src/TensorFlowNET.Core/Binding.Util.cs +++ b/src/TensorFlowNET.Core/Binding.Util.cs @@ -486,7 +486,28 @@ public static Shape GetShape(this object data) throw new NotImplementedException(""); } } - + public static NDArray GetFlattenArray(NDArray x) + { + switch (x.GetDataType()) + { + case TF_DataType.TF_FLOAT: + x = x.ToArray(); + break; + case TF_DataType.TF_DOUBLE: + x = x.ToArray(); + break; + case TF_DataType.TF_INT16: + case TF_DataType.TF_INT32: + x = x.ToArray(); + break; + case TF_DataType.TF_INT64: + x = x.ToArray(); + break; + default: + break; + } + return x; + } public static TF_DataType GetDataType(this object data) { var type = data.GetType(); diff --git a/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs b/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs index ea85048f8..5bc97952b 100644 --- a/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs +++ b/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs @@ -49,9 +49,30 @@ public static NDArray prod(NDArray array, Axis? axis = null, Type? dtype = null, [AutoNumPy] public static NDArray prod(params T[] array) where T : unmanaged => new NDArray(tf.reduce_prod(new NDArray(array))); + [AutoNumPy] + public static NDArray dot(NDArray x1, NDArray x2, NDArray? axes = null, string? name = null) + { + //if axes mentioned + if (axes != null) + { + return new NDArray(tf.dot_prod(x1, x2, axes, name)); + } + if (x1.shape.ndim > 1) + { + x1 = GetFlattenArray(x1); + } + if (x2.shape.ndim > 1) + { + x2 = GetFlattenArray(x2); + } + //if axes not mentioned, default 0,0 + return new NDArray(tf.dot_prod(x1, x2, axes: new int[] { 0, 0 }, name)); + } [AutoNumPy] public static NDArray power(NDArray x, NDArray y) => new NDArray(tf.pow(x, y)); + [AutoNumPy] + public static NDArray square(NDArray x) => new NDArray(tf.square(x)); [AutoNumPy] public static NDArray sin(NDArray x) => new NDArray(math_ops.sin(x)); diff --git a/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs b/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs index 32b517e4f..65cdaedd9 100644 --- a/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs +++ b/test/TensorFlowNET.UnitTest/Numpy/Math.Test.cs @@ -65,7 +65,34 @@ public void power() var y = np.power(x, 3); Assert.AreEqual(y, new[] { 0, 1, 8, 27, 64, 125 }); } - [TestMethod] + [TestMethod] + public void square() + { + var x = np.arange(6); + var y = np.square(x); + Assert.AreEqual(y, new[] { 0, 1, 4, 9, 16, 25 }); + } + [TestMethod] + public void dotproduct() + { + var x1 = new NDArray(new[] { 1, 2, 3 }); + var x2 = new NDArray(new[] { 4, 5, 6 }); + double result1 = np.dot(x1, x2); + NDArray y1 = new float[,] { + { 1.0f, 2.0f, 3.0f }, + { 4.0f, 5.1f,6.0f }, + { 4.0f, 5.1f,6.0f } + }; + NDArray y2 = new float[,] { + { 3.0f, 2.0f, 1.0f }, + { 6.0f, 5.1f, 4.0f }, + { 6.0f, 5.1f, 4.0f } + }; + double result2 = np.dot(y1, y2); + Assert.AreEqual(result1, 32); + Assert.AreEqual(Math.Round(result2, 2), 158.02); + } + [TestMethod] public void maximum() { var x1 = new NDArray(new[,] { { 1, 2, 3 }, { 4, 5.1, 6 } }); From 51b5f17c9a17397d61d1dc7df460517940e1107b Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Wed, 21 Jun 2023 21:41:06 +0800 Subject: [PATCH 051/182] fix: RNN training error on linux. --- src/TensorFlowNET.Core/APIs/c_api.cs | 14 ++++------- .../APIs/c_api.customize.cs | 2 +- src/TensorFlowNET.Core/Eager/GraphOnlyOps.cs | 25 +++++++++++++++++++ src/TensorFlowNET.Core/Graphs/FuncGraph.cs | 12 ++++----- src/TensorFlowNET.Core/Operations/list_ops.cs | 2 +- src/TensorFlowNET.Core/Operations/while_v2.cs | 9 +++---- src/TensorFlowNET.Core/ops.cs | 3 ++- 7 files changed, 44 insertions(+), 23 deletions(-) create mode 100644 src/TensorFlowNET.Core/Eager/GraphOnlyOps.cs diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs index 6049c95cc..d4744e789 100644 --- a/src/TensorFlowNET.Core/APIs/c_api.cs +++ b/src/TensorFlowNET.Core/APIs/c_api.cs @@ -51,17 +51,13 @@ public static string StringPiece(IntPtr handle) return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle); } - public unsafe static byte[] ByteStringPiece(IntPtr handle) + public unsafe static byte[] ByteStringPiece(Buffer? handle) { - byte* str_data = (byte*)handle.ToPointer(); - List bytes = new List(); - byte current = 255; - while (current != ((byte)'\0')) - { - current = *(str_data++); - bytes.Add(current); + if(handle is null){ + return new byte[0]; } - return bytes.Take(bytes.Count - 1).ToArray(); + var data = handle.ToArray(); + return data; } [UnmanagedFunctionPointer(CallingConvention.Winapi)] diff --git a/src/TensorFlowNET.Core/APIs/c_api.customize.cs b/src/TensorFlowNET.Core/APIs/c_api.customize.cs index d2aab9ac0..510e52eb7 100644 --- a/src/TensorFlowNET.Core/APIs/c_api.customize.cs +++ b/src/TensorFlowNET.Core/APIs/c_api.customize.cs @@ -10,7 +10,7 @@ public partial class c_api [DllImport(TensorFlowLibName)] public static extern void TFC_SetAttr(SafeGraphHandle graph, IntPtr op, string attr_name, SafeBufferHandle attr_value_proto, SafeStatusHandle status); [DllImport(TensorFlowLibName)] - public static extern IntPtr TFC_GetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output); + public static extern SafeBufferHandle TFC_GetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output); [DllImport(TensorFlowLibName)] public static extern void TFC_SetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output, byte[] data, long proto_len, SafeStatusHandle status); } diff --git a/src/TensorFlowNET.Core/Eager/GraphOnlyOps.cs b/src/TensorFlowNET.Core/Eager/GraphOnlyOps.cs new file mode 100644 index 000000000..2c20cfe9b --- /dev/null +++ b/src/TensorFlowNET.Core/Eager/GraphOnlyOps.cs @@ -0,0 +1,25 @@ +using Tensorflow; + +internal static class GraphOnlyOps +{ + /// + /// Graph-only version of tf.compat.v1.placeholder(), for internal use only. + /// + /// + /// + /// + /// + internal static Tensor graph_placeholder(TF_DataType dtype, Shape shape, string? name = null) + { + var dtype_value = new AttrValue() { Type = dtype.as_datatype_enum() }; + var shape_value = new AttrValue() { Shape = shape.as_proto() }; + var g = ops.get_default_graph(); + Dictionary attrs = new(); + attrs["dtype"] = dtype_value; + attrs["shape"] = shape_value; + var op = g.create_op("Placeholder", new Tensor[0], new TF_DataType[] { dtype }, + new TF_DataType[0], attrs: attrs, name: name); + var result = op.outputs[0]; + return result; + } +} \ No newline at end of file diff --git a/src/TensorFlowNET.Core/Graphs/FuncGraph.cs b/src/TensorFlowNET.Core/Graphs/FuncGraph.cs index ba7d7068e..6f7fa9c5f 100644 --- a/src/TensorFlowNET.Core/Graphs/FuncGraph.cs +++ b/src/TensorFlowNET.Core/Graphs/FuncGraph.cs @@ -544,12 +544,12 @@ private static object _get_defun_input(object arg, string name) Tensor placeholder; try { - placeholder = tf.placeholder(tensor.dtype, tensor.shape, name); + placeholder = GraphOnlyOps.graph_placeholder(tensor.dtype, tensor.shape, name); } - catch (ValueError) + catch (ValueError ex) { - // TODO(Rinne): Add warning here. - placeholder = tf.placeholder(tensor.dtype, tensor.shape); + tf.Logger.Warning(ex.ToString()); + placeholder = GraphOnlyOps.graph_placeholder(tensor.dtype, tensor.shape); } handle_data_util.copy_handle_data(tensor, placeholder); if (name is not null) @@ -575,12 +575,12 @@ private static object _get_defun_input(object arg, string name) Tensor placeholder; try { - placeholder = tf.placeholder(spec.dtype, spec.shape, requested_name); + placeholder = GraphOnlyOps.graph_placeholder(spec.dtype, spec.shape, requested_name); } catch (ValueError) { // TODO(Rinne): Add warning here. - placeholder = tf.placeholder(spec.dtype, spec.shape); + placeholder = GraphOnlyOps.graph_placeholder(spec.dtype, spec.shape); } if (name is not null) { diff --git a/src/TensorFlowNET.Core/Operations/list_ops.cs b/src/TensorFlowNET.Core/Operations/list_ops.cs index c5e83ee41..3791a2c19 100644 --- a/src/TensorFlowNET.Core/Operations/list_ops.cs +++ b/src/TensorFlowNET.Core/Operations/list_ops.cs @@ -31,7 +31,7 @@ private static Tensor _build_element_shape(Shape? shape) } else { - return ops.convert_to_tensor(shape); + return ops.convert_to_tensor(shape, dtype: dtypes.int32); } } diff --git a/src/TensorFlowNET.Core/Operations/while_v2.cs b/src/TensorFlowNET.Core/Operations/while_v2.cs index 3f324f872..aae15b77d 100644 --- a/src/TensorFlowNET.Core/Operations/while_v2.cs +++ b/src/TensorFlowNET.Core/Operations/while_v2.cs @@ -38,9 +38,9 @@ public static Tensor[] while_loop(Func cond, int len_orig_loop_vars = orig_loop_vars.Length; loop_vars = _tensor_array_to_flow(loop_vars); - loop_vars = Nest.MapStructure(x => _convert_to_tensor_or_indexed_slices(x, TF_DataType.DtInvalid, null), loop_vars).ToTensors(); + loop_vars = Nest.MapStructure(x => _convert_to_tensor_or_indexed_slices(x), loop_vars).ToTensors(); - var loop_vars_signature = Nest.MapStructure(x => new TensorSpec(x.shape, x.dtype), _tensor_array_to_flow(loop_vars)); + var loop_vars_signature = Nest.MapStructure(x => new TensorSpec(x.shape, x.dtype), loop_vars); var flat_shape_invariants = Nest.Flatten(loop_vars_signature).Select(x => x.shape).ToArray(); @@ -379,10 +379,9 @@ private static string _build_cond_placeholders_name_prefix(FuncGraph cond_graph) return cond_graph.unique_name(cond_graph.Name + "___redundant_placeholder"); } - private static Tensor _convert_to_tensor_or_indexed_slices(Tensor value, TF_DataType dtype, - string name) + private static Tensor _convert_to_tensor_or_indexed_slices(Tensor value) { - return ops.convert_to_tensor(value, dtype, name, false); + return ops.convert_to_tensor(value, as_ref: false); } private static Tensor _build_maximum_iterations_loop_var(int maximum_iterations = -1) diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index fb9bccf31..a962e6d87 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -576,7 +576,8 @@ public static bool inside_function() public static HandleData get_resource_handle_data(Tensor graph_op) { var handle_data = c_api.TFC_GetHandleShapeAndType(graph_op.graph.c_graph, graph_op._as_tf_output()); - return HandleData.Parser.ParseFrom(c_api.ByteStringPiece(handle_data)); + var handle_str = c_api.ByteStringPiece(handle_data.DangerousGetHandle() == IntPtr.Zero ? null : new Buffer(handle_data)); + return HandleData.Parser.ParseFrom(handle_str); } public static void dismantle_graph(Graph graph) From 69b3bce3309d62b26d91614a1e2430ff0e5b183c Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Thu, 22 Jun 2023 02:07:10 +0800 Subject: [PATCH 052/182] test: update the redist version of test. --- .../Tensorflow.UnitTest.RedistHolder.csproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj b/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj index 878077582..1ca387dbb 100644 --- a/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj +++ b/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj @@ -5,7 +5,7 @@ - + From 46e216279747397507f833e765843467c6f35e40 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Wed, 21 Jun 2023 17:25:17 -0500 Subject: [PATCH 053/182] Fix model.evaluate in NeuralNetXorKeras. --- src/TensorFlowNET.Core/APIs/c_api.cs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs index 6049c95cc..63bdfd27d 100644 --- a/src/TensorFlowNET.Core/APIs/c_api.cs +++ b/src/TensorFlowNET.Core/APIs/c_api.cs @@ -53,6 +53,11 @@ public static string StringPiece(IntPtr handle) public unsafe static byte[] ByteStringPiece(IntPtr handle) { + if (handle == IntPtr.Zero) + { + return new byte[0]; + } + byte* str_data = (byte*)handle.ToPointer(); List bytes = new List(); byte current = 255; From ae8fe840e457b0b34d04fc0cafdb31d89b7a9d4d Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Thu, 22 Jun 2023 09:21:18 +0800 Subject: [PATCH 054/182] fix: resolve conflict. --- src/TensorFlowNET.Core/APIs/c_api.cs | 4 +++- src/TensorFlowNET.Core/ops.cs | 10 ++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs index 559176a54..a91b86827 100644 --- a/src/TensorFlowNET.Core/APIs/c_api.cs +++ b/src/TensorFlowNET.Core/APIs/c_api.cs @@ -53,8 +53,10 @@ public static string StringPiece(IntPtr handle) public unsafe static byte[] ByteStringPiece(Buffer? handle) { - if(handle is null){ + if (handle is null) + { return new byte[0]; + } var data = handle.ToArray(); return data; } diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index a962e6d87..7bd78a79f 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -576,8 +576,14 @@ public static bool inside_function() public static HandleData get_resource_handle_data(Tensor graph_op) { var handle_data = c_api.TFC_GetHandleShapeAndType(graph_op.graph.c_graph, graph_op._as_tf_output()); - var handle_str = c_api.ByteStringPiece(handle_data.DangerousGetHandle() == IntPtr.Zero ? null : new Buffer(handle_data)); - return HandleData.Parser.ParseFrom(handle_str); + try{ + var handle_str = c_api.ByteStringPiece(handle_data.DangerousGetHandle() == IntPtr.Zero ? null : new Buffer(handle_data)); + return HandleData.Parser.ParseFrom(handle_str); + } + catch(Exception){ + var handle_str = c_api.ByteStringPieceFromNativeString(handle_data.DangerousGetHandle()); + return HandleData.Parser.ParseFrom(handle_str); + } } public static void dismantle_graph(Graph graph) From 4c6063d03e3bb8af35007c16ca2585c772994301 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Wed, 21 Jun 2023 21:05:51 -0500 Subject: [PATCH 055/182] Update version number. --- src/TensorFlowNET.Core/Tensorflow.Binding.csproj | 10 +++++----- src/TensorFlowNET.Keras/Tensorflow.Keras.csproj | 8 ++++---- test/TensorflowNET.Hub.Unittest/KerasLayerTest.cs | 1 + 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index 02578ec18..61b86168e 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -5,7 +5,7 @@ Tensorflow.Binding Tensorflow 2.10.0 - 0.100.5 + 0.110.0 10.0 enable Haiping Chen, Meinrad Recheis, Eli Belash @@ -20,7 +20,7 @@ Google's TensorFlow full binding in .NET Standard. Building, training and infering deep learning models. https://tensorflownet.readthedocs.io - 0.100.5.0 + 0.110.0.0 tf.net 0.100.x and above are based on tensorflow native 2.10.0 @@ -38,7 +38,7 @@ https://tensorflownet.readthedocs.io tf.net 0.7x.x aligns with TensorFlow v2.7.x native library. tf.net 0.10x.x aligns with TensorFlow v2.10.x native library. - 0.100.5.0 + 0.110.0.0 LICENSE true packages @@ -110,13 +110,13 @@ https://tensorflownet.readthedocs.io - + - + diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj index 8b3c92655..320c3b679 100644 --- a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -7,7 +7,7 @@ enable Tensorflow.Keras AnyCPU;x64 - 0.10.5 + 0.11.0 Haiping Chen Keras for .NET Apache 2.0, Haiping Chen 2023 @@ -38,8 +38,8 @@ Keras is an API designed for human beings, not machines. Keras follows best prac Git true Open.snk - 0.10.5.0 - 0.10.5.0 + 0.11.0.0 + 0.11.0.0 LICENSE Debug;Release;GPU @@ -71,7 +71,7 @@ Keras is an API designed for human beings, not machines. Keras follows best prac - + diff --git a/test/TensorflowNET.Hub.Unittest/KerasLayerTest.cs b/test/TensorflowNET.Hub.Unittest/KerasLayerTest.cs index 4ee4d54c4..b9a8ed804 100644 --- a/test/TensorflowNET.Hub.Unittest/KerasLayerTest.cs +++ b/test/TensorflowNET.Hub.Unittest/KerasLayerTest.cs @@ -6,6 +6,7 @@ namespace Tensorflow.Hub.Unittest [TestClass] public class KerasLayerTest { + [Ignore] [TestMethod] public void SmallBert() { From 3805771121162c3e0806198acd18619c6cd6394b Mon Sep 17 00:00:00 2001 From: Beacontownfc <19636977267@qq.com> Date: Thu, 22 Jun 2023 05:53:10 +0000 Subject: [PATCH 056/182] improve layer norm --- src/TensorFlowNET.Core/APIs/tf.nn.cs | 18 +++++++++++++++ .../Normalization/LayerNormalization.cs | 15 ++++++++++++- .../Layers/LayersTest.cs | 22 +++++++++++++++++++ 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index e0c29bfa7..08b88c3d6 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -14,8 +14,10 @@ You may obtain a copy of the License at limitations under the License. ******************************************************************************/ +using System.Xml.Linq; using Tensorflow.Operations; using Tensorflow.Operations.Activation; +//using static System.Formats.Asn1.AsnWriter; using static Tensorflow.Binding; namespace Tensorflow @@ -125,6 +127,22 @@ public Tensor[] fused_batch_norm(Tensor x, is_training: is_training, name: name, exponential_avg_factor: exponential_avg_factor); + public Tensor batch_normalization(Tensor x, + Tensor mean, + Tensor variance, + Tensor offset, + Tensor scale, + float variance_epsilon, + string name = null) + { + var inv = math_ops.rsqrt(variance + variance_epsilon); + tf_with(ops.name_scope(name, "batchnorm", (x, mean, variance, scale, offset)), scope => + { + if (scale != null) inv *= scale; + }); + if (offset != null) return x * math_ops.cast(inv, x.dtype) + math_ops.cast(offset - mean * inv, dtype: x.dtype); + else return x * math_ops.cast(inv, x.dtype) + math_ops.cast(-mean * inv, dtype: x.dtype); + } public Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null) => nn_ops.max_pool(value, ksize, strides, padding, data_format: data_format, name: name); diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs index 1898f24c8..69bdfbaa0 100644 --- a/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs +++ b/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs @@ -153,9 +153,22 @@ protected override Tensors Call(Tensors inputs, Tensors state = null, bool? trai } else { + var input_dtype = inputs.dtype; + if ((input_dtype == tf.float16) && DType == tf.float32) inputs = tf.cast(inputs, tf.float32); + (Tensor mean, Tensor variance) = tf.nn.moments(inputs, axis, keep_dims: true); - } + (Tensor scale, Tensor offset) = (_broadcast(gamma), _broadcast(beta)); + + outputs = tf.nn.batch_normalization( + inputs, + mean, + variance, + offset: offset, + scale: scale, + variance_epsilon: epsilon); + outputs = tf.cast(outputs, input_dtype); + } // If some components of the shape got lost due to adjustments, fix that. outputs.shape = input_shape; diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs index f4980b82d..98d909668 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs @@ -1,5 +1,7 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; using System.Collections.Generic; +using System.Linq; using Tensorflow.NumPy; using static Tensorflow.Binding; using static Tensorflow.KerasApi; @@ -161,6 +163,26 @@ public void LayerNormalization() Tensor output = layer.Apply(inputs); Assert.AreEqual((5, 2), output.shape); Assert.IsTrue(output[0].numpy().Equals(new[] { -0.99998f, 0.99998f })); + + // test_layernorm_weights + Assert.AreEqual(len(layer.TrainableWeights), 2); + Assert.AreEqual(len(layer.Weights), 2); + + var beta = layer.Weights.Where(x => x.Name.StartsWith("beta")).Single(); + var gamma = layer.Weights.Where(x => x.Name.StartsWith("gamma")).Single(); + + // correctness_test + layer = keras.layers.LayerNormalization(axis: -1, epsilon: (float) 1e-12); + var x = np.random.normal(loc: 5.0f, scale: 10.0f, size: (1000, 2, 2, 2)).astype(tf.float32); + + output = layer.Apply(x); + + var y = (output - beta.numpy()) / gamma.numpy(); + + var y_mean = np.mean(y.numpy()); + var y_std = np.sqrt(np.sum(np.power(y.numpy() - np.mean(y.numpy()), 2)) / 8000); + Assert.IsTrue(tf.greater(np.array(0.1f), tf.abs(y_std - 1.0)).ToArray()[0]); + Assert.IsTrue(tf.greater(np.array(0.1f), tf.abs(y_mean)).ToArray()[0]); } /// From 786b26602ff502284f56d85586961fb9f824cc22 Mon Sep 17 00:00:00 2001 From: Beacontownfc <19636977267@qq.com> Date: Thu, 22 Jun 2023 07:15:08 +0000 Subject: [PATCH 057/182] Modify according to the reviewer's comments --- src/TensorFlowNET.Core/APIs/tf.nn.cs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index 08b88c3d6..e5cd4e569 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -17,7 +17,6 @@ limitations under the License. using System.Xml.Linq; using Tensorflow.Operations; using Tensorflow.Operations.Activation; -//using static System.Formats.Asn1.AsnWriter; using static Tensorflow.Binding; namespace Tensorflow @@ -127,6 +126,18 @@ public Tensor[] fused_batch_norm(Tensor x, is_training: is_training, name: name, exponential_avg_factor: exponential_avg_factor); + + /// + /// Normalizes a tensor by `mean` and `variance`, and applies (optionally) a`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\). + /// + /// A floating point tensor. + /// A mean `Tensor`. + /// A variance `Tensor`. + /// An offset `Tensor`, often denoted \\(\beta\\) in equations, or NULL. If present, will be added to the normalized tensor. + /// A scale `Tensor`, often denoted \\(\gamma\\) in equations, or NULL. If present, the scale is applied to the normalized tensor. + /// A small float number to avoid dividing by 0. + /// A name for this operation. + /// the normalized, scaled, offset tensor. public Tensor batch_normalization(Tensor x, Tensor mean, Tensor variance, From aac52940ade5c788bc7d8d6949da718b63293dc1 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 23 Jun 2023 13:17:46 +0800 Subject: [PATCH 058/182] init pickle support to np.load object type of npy --- .../NumPy/DtypeConstructor.cs | 40 ++++++++++++ .../Implementation/NumPyImpl.Creation.cs | 18 +++++- .../NumPy/Implementation/NumPyImpl.load.cs | 22 +++++-- .../NumPy/MultiArrayConstructor.cs | 44 +++++++++++++ .../NumPy/NDArray.Pickle.cs | 19 ++++++ .../Tensorflow.Binding.csproj | 1 + src/TensorFlowNET.Keras/Datasets/Imdb.cs | 63 +++++++++++++++++-- .../Dataset/DatasetTest.cs | 17 +++++ 8 files changed, 215 insertions(+), 9 deletions(-) create mode 100644 src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs create mode 100644 src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs create mode 100644 src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs diff --git a/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs b/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs new file mode 100644 index 000000000..f84f408e1 --- /dev/null +++ b/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs @@ -0,0 +1,40 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Text; +using Razorvine.Pickle; + +namespace Tensorflow.NumPy +{ + /// + /// + /// + [SuppressMessage("ReSharper", "InconsistentNaming")] + [SuppressMessage("ReSharper", "MemberCanBePrivate.Global")] + [SuppressMessage("ReSharper", "MemberCanBeMadeStatic.Global")] + class DtypeConstructor : IObjectConstructor + { + public object construct(object[] args) + { + Console.WriteLine("DtypeConstructor"); + Console.WriteLine(args.Length); + for (int i = 0; i < args.Length; i++) + { + Console.WriteLine(args[i]); + } + return new demo(); + } + } + class demo + { + public void __setstate__(object[] args) + { + Console.WriteLine("demo __setstate__"); + Console.WriteLine(args.Length); + for (int i = 0; i < args.Length; i++) + { + Console.WriteLine(args[i]); + } + } + } +} diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs index f29879b0f..80b62198a 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs @@ -4,6 +4,7 @@ using System.Linq; using System.Text; using Tensorflow.Util; +using Razorvine.Pickle; using static Tensorflow.Binding; namespace Tensorflow.NumPy @@ -93,10 +94,25 @@ Array ReadValueMatrix(BinaryReader reader, Array matrix, int bytes, Type type, i var buffer = reader.ReadBytes(bytes * total); System.Buffer.BlockCopy(buffer, 0, matrix, 0, buffer.Length); - return matrix; } + NDArray ReadObjectMatrix(BinaryReader reader, Array matrix, int[] shape) + { + //int data = reader.ReadByte(); + //Console.WriteLine(data); + //Console.WriteLine(reader.ReadByte()); + Stream stream = reader.BaseStream; + Unpickler.registerConstructor("numpy.core.multiarray", "_reconstruct", new MultiArrayConstructor()); + Unpickler.registerConstructor("numpy", "dtype", new DtypeConstructor()); + + var unpickler = new Unpickler(); + + NDArray result = (NDArray) unpickler.load(stream); + Console.WriteLine(result.dims); + return result; + } + public (NDArray, NDArray) meshgrid(T[] array, bool copy = true, bool sparse = false) { var tensors = array_ops.meshgrid(array, copy: copy, sparse: sparse); diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs index 05f53d5e7..789f119a1 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs @@ -27,9 +27,20 @@ public Array LoadMatrix(Stream stream) Array matrix = Array.CreateInstance(type, shape); //if (type == typeof(String)) - //return ReadStringMatrix(reader, matrix, bytes, type, shape); + //return ReadStringMatrix(reader, matrix, bytes, type, shape); + NDArray res = ReadObjectMatrix(reader, matrix, shape); + Console.WriteLine("LoadMatrix"); + Console.WriteLine(res.dims[0]); + Console.WriteLine((int)res[0][0]); + Console.WriteLine(res.dims[1]); + //if (type == typeof(Object)) + //{ + + //} + //else return ReadValueMatrix(reader, matrix, bytes, type, shape); } + } public T Load(Stream stream) @@ -37,7 +48,7 @@ public T Load(Stream stream) ICloneable, IList, ICollection, IEnumerable, IStructuralComparable, IStructuralEquatable { // if (typeof(T).IsArray && (typeof(T).GetElementType().IsArray || typeof(T).GetElementType() == typeof(string))) - // return LoadJagged(stream) as T; + // return LoadJagged(stream) as T; return LoadMatrix(stream) as T; } @@ -48,7 +59,7 @@ bool ParseReader(BinaryReader reader, out int bytes, out Type t, out int[] shape shape = null; // The first 6 bytes are a magic string: exactly "x93NUMPY" - if (reader.ReadChar() != 63) return false; + if (reader.ReadByte() != 0x93) return false; if (reader.ReadChar() != 'N') return false; if (reader.ReadChar() != 'U') return false; if (reader.ReadChar() != 'M') return false; @@ -64,6 +75,7 @@ bool ParseReader(BinaryReader reader, out int bytes, out Type t, out int[] shape ushort len = reader.ReadUInt16(); string header = new String(reader.ReadChars(len)); + Console.WriteLine(header); string mark = "'descr': '"; int s = header.IndexOf(mark) + mark.Length; int e = header.IndexOf("'", s + 1); @@ -93,7 +105,7 @@ bool ParseReader(BinaryReader reader, out int bytes, out Type t, out int[] shape Type GetType(string dtype, out int bytes, out bool? isLittleEndian) { isLittleEndian = IsLittleEndian(dtype); - bytes = Int32.Parse(dtype.Substring(2)); + bytes = dtype.Length > 2 ? Int32.Parse(dtype.Substring(2)) : 0; string typeCode = dtype.Substring(1); @@ -121,6 +133,8 @@ Type GetType(string dtype, out int bytes, out bool? isLittleEndian) return typeof(Double); if (typeCode.StartsWith("S")) return typeof(String); + if (typeCode == "O") + return typeof(Object); throw new NotSupportedException(); } diff --git a/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs b/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs new file mode 100644 index 000000000..92927cd5a --- /dev/null +++ b/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs @@ -0,0 +1,44 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Text; +using Razorvine.Pickle; + +namespace Tensorflow.NumPy +{ + /// + /// Creates multiarrays of objects. Returns a primitive type multiarray such as int[][] if + /// the objects are ints, etc. + /// + [SuppressMessage("ReSharper", "InconsistentNaming")] + [SuppressMessage("ReSharper", "MemberCanBePrivate.Global")] + [SuppressMessage("ReSharper", "MemberCanBeMadeStatic.Global")] + public class MultiArrayConstructor : IObjectConstructor + { + public object construct(object[] args) + { + //Console.WriteLine(args.Length); + //for (int i = 0; i < args.Length; i++) + //{ + // Console.WriteLine(args[i]); + //} + Console.WriteLine("MultiArrayConstructor"); + + var arg1 = (Object[])args[1]; + var dims = new int[arg1.Length]; + for (var i = 0; i < arg1.Length; i++) + { + dims[i] = (int)arg1[i]; + } + + var dtype = TF_DataType.DtInvalid; + switch (args[2]) + { + case "b": dtype = TF_DataType.DtUint8Ref; break; + default: throw new NotImplementedException("cannot parse" + args[2]); + } + return new NDArray(new Shape(dims), dtype); + + } + } +} diff --git a/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs b/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs new file mode 100644 index 000000000..b4d66243a --- /dev/null +++ b/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.NumPy +{ + public partial class NDArray + { + public void __setstate__(object[] args) + { + Console.WriteLine("NDArray __setstate__"); + Console.WriteLine(args.Length); + for (int i = 0; i < args.Length; i++) + { + Console.WriteLine(args[i]); + } + } + } +} diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index 09f5b0770..38778c3fe 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -112,6 +112,7 @@ https://tensorflownet.readthedocs.io + diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 56b0d2a77..016b352d9 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -5,6 +5,13 @@ using Tensorflow.Keras.Utils; using Tensorflow.NumPy; using System.Linq; +using Google.Protobuf.Collections; +using Microsoft.VisualBasic; +using OneOf.Types; +using static HDF.PInvoke.H5; +using System.Data; +using System.Reflection.Emit; +using System.Xml.Linq; namespace Tensorflow.Keras.Datasets { @@ -12,13 +19,59 @@ namespace Tensorflow.Keras.Datasets /// This is a dataset of 25,000 movies reviews from IMDB, labeled by sentiment /// (positive/negative). Reviews have been preprocessed, and each review is /// encoded as a list of word indexes(integers). + /// For convenience, words are indexed by overall frequency in the dataset, + /// so that for instance the integer "3" encodes the 3rd most frequent word in + /// the data.This allows for quick filtering operations such as: + /// "only consider the top 10,000 most + /// common words, but eliminate the top 20 most common words". + /// As a convention, "0" does not stand for a specific word, but instead is used + /// to encode the pad token. + /// Args: + /// path: where to cache the data (relative to %TEMP%/imdb/imdb.npz). + /// num_words: integer or None.Words are + /// ranked by how often they occur(in the training set) and only + /// the `num_words` most frequent words are kept.Any less frequent word + /// will appear as `oov_char` value in the sequence data.If None, + /// all words are kept.Defaults to `None`. + /// skip_top: skip the top N most frequently occurring words + /// (which may not be informative). These words will appear as + /// `oov_char` value in the dataset.When 0, no words are + /// skipped. Defaults to `0`. + /// maxlen: int or None.Maximum sequence length. + /// Any longer sequence will be truncated. None, means no truncation. + /// Defaults to `None`. + /// seed: int. Seed for reproducible data shuffling. + /// start_char: int. The start of a sequence will be marked with this + /// character. 0 is usually the padding character. Defaults to `1`. + /// oov_char: int. The out-of-vocabulary character. + /// Words that were cut out because of the `num_words` or + /// `skip_top` limits will be replaced with this character. + /// index_from: int. Index actual words with this index and higher. + /// Returns: + /// Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. + /// + /// ** x_train, x_test**: lists of sequences, which are lists of indexes + /// (integers). If the num_words argument was specific, the maximum + /// possible index value is `num_words - 1`. If the `maxlen` argument was + /// specified, the largest possible sequence length is `maxlen`. + /// + /// ** y_train, y_test**: lists of integer labels(1 or 0). + /// + /// Raises: + /// ValueError: in case `maxlen` is so low + /// that no input sequence could be kept. + /// Note that the 'out of vocabulary' character is only used for + /// words that were present in the training set but are not included + /// because they're not making the `num_words` cut here. + /// Words that were not seen in the training set but are in the test set + /// have simply been skipped. /// + /// """Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/). public class Imdb { string origin_folder = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"; string file_name = "imdb.npz"; string dest_folder = "imdb"; - /// /// Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/). /// @@ -41,8 +94,10 @@ public DatasetPass load_data(string path = "imdb.npz", int index_from = 3) { var dst = Download(); - - var lines = File.ReadAllLines(Path.Combine(dst, "imdb_train.txt")); + var fileBytes = File.ReadAllBytes(Path.Combine(dst, file_name)); + var (x_train, x_test) = LoadX(fileBytes); + var (y_train, y_test) = LoadY(fileBytes); + /*var lines = File.ReadAllLines(Path.Combine(dst, "imdb_train.txt")); var x_train_string = new string[lines.Length]; var y_train = np.zeros(new int[] { lines.Length }, np.int64); for (int i = 0; i < lines.Length; i++) @@ -62,7 +117,7 @@ public DatasetPass load_data(string path = "imdb.npz", x_test_string[i] = lines[i].Substring(2); } - var x_test = np.array(x_test_string); + var x_test = np.array(x_test_string);*/ return new DatasetPass { diff --git a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs index 8317346ea..778290bb8 100644 --- a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs +++ b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs @@ -1,7 +1,9 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; +using System.Collections.Generic; using System.Linq; using static Tensorflow.Binding; +using static Tensorflow.KerasApi; namespace TensorFlowNET.UnitTest.Dataset { @@ -195,5 +197,20 @@ public void Shuffle() Assert.IsFalse(allEqual); } + [TestMethod] + public void GetData() + { + var vocab_size = 20000; // Only consider the top 20k words + var maxlen = 200; // Only consider the first 200 words of each movie review + var dataset = keras.datasets.imdb.load_data(num_words: vocab_size); + var x_train = dataset.Train.Item1; + var y_train = dataset.Train.Item2; + var x_val = dataset.Test.Item1; + var y_val = dataset.Test.Item2; + print(len(x_train) + "Training sequences"); + print(len(x_val) + "Validation sequences"); + x_train = keras.preprocessing.sequence.pad_sequences((IEnumerable)x_train, maxlen: maxlen); + x_val = keras.preprocessing.sequence.pad_sequences((IEnumerable)x_val, maxlen: maxlen); + } } } From fcd10447abb20e50ed2d67e313c2f75566319649 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 23 Jun 2023 13:39:36 +0800 Subject: [PATCH 059/182] add more type case for tensor.zeros --- src/TensorFlowNET.Core/Operations/array_ops.cs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index a0b47aace..24c392155 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -84,8 +84,13 @@ public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT // var shape_tensor = constant_op._tensor_shape_tensor_conversion_function(shape); Tensor zeros = dtype switch { + TF_DataType.TF_BOOL => constant(false), TF_DataType.TF_DOUBLE => constant(0d), TF_DataType.TF_FLOAT => constant(0f), + TF_DataType.TF_INT64 => constant(0L), + TF_DataType.TF_UINT64 => constant((ulong)0), + TF_DataType.TF_INT32 => constant(0), + TF_DataType.TF_UINT32 => constant((uint)0), TF_DataType.TF_INT8 => constant((sbyte)0), TF_DataType.TF_UINT8 => constant((byte)0), _ => constant(0) @@ -108,9 +113,15 @@ public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT return _constant_if_small(0.0F, shape, dtype, name); case TF_DataType.TF_INT64: return _constant_if_small(0L, shape, dtype, name); + case TF_DataType.TF_UINT64: + return _constant_if_small(0, shape, dtype, name); case TF_DataType.TF_INT32: return _constant_if_small(0, shape, dtype, name); + case TF_DataType.TF_UINT32: + return _constant_if_small(0, shape, dtype, name); case TF_DataType.TF_INT8: + return _constant_if_small(0, shape, dtype, name); + case TF_DataType.TF_UINT8: return _constant_if_small(0, shape, dtype, name); default: throw new TypeError("can't find type for zeros"); From e749aaeaae197464f817e1c7bfffe6f922d55b6a Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 23 Jun 2023 14:04:44 +0800 Subject: [PATCH 060/182] add more implicit operator for NDArray and UnitTest for `keras.datasets.imdb` --- src/TensorFlowNET.Core/NumPy/NDArray.Implicit.cs | 6 ++++++ .../TensorFlowNET.UnitTest/Dataset/DatasetTest.cs | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/src/TensorFlowNET.Core/NumPy/NDArray.Implicit.cs b/src/TensorFlowNET.Core/NumPy/NDArray.Implicit.cs index fd4f93fc1..45b236c7b 100644 --- a/src/TensorFlowNET.Core/NumPy/NDArray.Implicit.cs +++ b/src/TensorFlowNET.Core/NumPy/NDArray.Implicit.cs @@ -107,9 +107,15 @@ public unsafe static implicit operator double(NDArray nd) public static implicit operator NDArray(bool value) => new NDArray(value); + public static implicit operator NDArray(byte value) + => new NDArray(value); + public static implicit operator NDArray(int value) => new NDArray(value); + public static implicit operator NDArray(long value) + => new NDArray(value); + public static implicit operator NDArray(float value) => new NDArray(value); diff --git a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs index 8317346ea..875e50019 100644 --- a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs +++ b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs @@ -2,6 +2,7 @@ using System; using System.Linq; using static Tensorflow.Binding; +using static Tensorflow.KerasApi; namespace TensorFlowNET.UnitTest.Dataset { @@ -195,5 +196,19 @@ public void Shuffle() Assert.IsFalse(allEqual); } + [TestMethod] + public void GetData() + { + var vocab_size = 20000; + var dataset = keras.datasets.imdb.load_data(num_words: vocab_size); + var x_train = dataset.Train.Item1; + Assert.AreEqual(x_train.dims[0], 25000); + var y_train = dataset.Train.Item2; + Assert.AreEqual(y_train.dims[0], 25000); + var x_val = dataset.Test.Item1; + Assert.AreEqual(x_val.dims[0], 25000); + var y_val = dataset.Test.Item2; + Assert.AreEqual(y_val.dims[0], 25000); + } } } From c23b24633fa1111d613deeedba5c9869ea463dd8 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 23 Jun 2023 14:21:27 +0800 Subject: [PATCH 061/182] remove UnitTest for `keras.datasets.imdb` --- .../TensorFlowNET.UnitTest/Dataset/DatasetTest.cs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs index 875e50019..8317346ea 100644 --- a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs +++ b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs @@ -2,7 +2,6 @@ using System; using System.Linq; using static Tensorflow.Binding; -using static Tensorflow.KerasApi; namespace TensorFlowNET.UnitTest.Dataset { @@ -196,19 +195,5 @@ public void Shuffle() Assert.IsFalse(allEqual); } - [TestMethod] - public void GetData() - { - var vocab_size = 20000; - var dataset = keras.datasets.imdb.load_data(num_words: vocab_size); - var x_train = dataset.Train.Item1; - Assert.AreEqual(x_train.dims[0], 25000); - var y_train = dataset.Train.Item2; - Assert.AreEqual(y_train.dims[0], 25000); - var x_val = dataset.Test.Item1; - Assert.AreEqual(x_val.dims[0], 25000); - var y_val = dataset.Test.Item2; - Assert.AreEqual(y_val.dims[0], 25000); - } } } From bfa9f77f42a361b4a31b644454d6338182c81e93 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Sat, 24 Jun 2023 08:55:40 -0500 Subject: [PATCH 062/182] tf.math.sqrt --- src/TensorFlowNET.Core/APIs/tf.math.cs | 2 +- src/TensorFlowNET.Core/Operations/math_ops.cs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index 0e53d938a..ffbc43738 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -354,7 +354,7 @@ public Tensor divide(Tensor a, Tensor b) => a / b; public Tensor sqrt(Tensor a, string name = null) - => gen_math_ops.sqrt(a, name); + => math_ops.sqrt(a, name); public Tensor sign(Tensor a, string name = null) => gen_math_ops.sign(a, name); diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index 5ded448ac..d00a5d367 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -269,7 +269,7 @@ public static Tensor erf(Tensor x, string name = null) => tf.Context.ExecuteOp("Erf", name, new ExecuteOpArgs(x)); public static Tensor sqrt(Tensor x, string name = null) - => gen_math_ops.sqrt(x, name: name); + => tf.Context.ExecuteOp("Sqrt", name, new ExecuteOpArgs(x)); public static Tensor multiply(Tensor x, Tensor y, string name = null) => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(x, y)); From eeb20e4fe620161a2e65ce63e72cd39cd9086548 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Mon, 26 Jun 2023 16:20:45 +0800 Subject: [PATCH 063/182] Add new feature: Add UpSampling1D layer and test. --- .../Reshaping/UpSampling2DArgs.cs | 2 +- .../Reshaping/Upsampling1DArgs.cs | 10 +++ .../Keras/Layers/ILayersApi.Reshaping.cs | 4 ++ src/TensorFlowNET.Keras/BackendImpl.cs | 26 ++++++++ .../Layers/LayersApi.Reshaping.cs | 61 +++++++++++-------- .../Layers/Reshaping/UpSampling1D.cs | 32 ++++++++++ .../Layers/Reshaping/UpSampling2D.cs | 3 + .../Layers/Layers.Reshaping.Test.cs | 10 +++ 8 files changed, 123 insertions(+), 25 deletions(-) create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/Upsampling1DArgs.cs create mode 100644 src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling1D.cs diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/UpSampling2DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/UpSampling2DArgs.cs index b35e0e4b6..504b3d46d 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/UpSampling2DArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/UpSampling2DArgs.cs @@ -7,7 +7,7 @@ public class UpSampling2DArgs : AutoSerializeLayerArgs [JsonProperty("size")] public Shape Size { get; set; } [JsonProperty("data_format")] - public string DataFormat { get; set; } + public string DataFormat { get; set; } = "channels_last"; /// /// 'nearest', 'bilinear' /// diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/Upsampling1DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/Upsampling1DArgs.cs new file mode 100644 index 000000000..4e3dbf17a --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/Upsampling1DArgs.cs @@ -0,0 +1,10 @@ +using Newtonsoft.Json; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class UpSampling1DArgs : AutoSerializeLayerArgs + { + [JsonProperty("size")] + public int Size { get; set; } + } +} diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.Reshaping.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.Reshaping.cs index d41e06887..ae34c514f 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.Reshaping.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.Reshaping.cs @@ -9,6 +9,10 @@ public partial interface ILayersApi public ILayer Reshape(Shape target_shape); public ILayer Reshape(object[] target_shape); + public ILayer UpSampling1D( + int size + ); + public ILayer UpSampling2D(Shape size = null, string data_format = null, string interpolation = "nearest"); diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index 8dbcf90d5..364800ae5 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -956,6 +956,32 @@ Tensors _step(Tensors tensors) } + /// + /// Repeats the elements of a tensor along an axis, like `np.repeat`. + /// + /// + /// + /// + /// + public Tensor repeat_elements(Tensor x, int rep, int axis) + { + var x_shape = x.shape.as_int_list(); + if (x_shape[axis] != -1) + { + var splits = tf.split(x, x_shape[axis], axis:axis); + var x_rep = splits.SelectMany(s => Enumerable.Repeat(s, rep)).ToArray(); + return concatenate(x_rep, axis); + } + //var auxiliary_axis = axis + 1; + //x_shape = x.shape; + //var x_rep = tf.expand_dims(x, auxiliary_axis); + //var reps = np.ones(x_shape.Length + 1); + //reps[auxiliary_axis] = rep; + //x_rep = tf.tile(x_rep, reps); + + throw new NotImplementedException(); + + } public Tensor reverse(Tensor input, int axis) { return reverse(input, new int[] { axis }); diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.Reshaping.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.Reshaping.cs index d3db1d663..2ee99bc79 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.Reshaping.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.Reshaping.cs @@ -6,35 +6,48 @@ namespace Tensorflow.Keras.Layers { public partial class LayersApi { - /// - /// Zero-padding layer for 2D input (e.g. picture). - /// - /// - /// - public ILayer ZeroPadding2D ( NDArray padding ) + + /// + /// Upsampling layer for 1D inputs. Repeats each temporal step `size` times along the time axis. + /// + /// + /// + public ILayer UpSampling1D(int size) + => new UpSampling1D(new UpSampling1DArgs + { + Size = size + }); + + /// + /// Zero-padding layer for 2D input (e.g. picture). + /// + /// + /// + public ILayer ZeroPadding2D ( NDArray padding ) => new ZeroPadding2D(new ZeroPadding2DArgs { Padding = padding }); - /// - /// Upsampling layer for 2D inputs.
- /// Repeats the rows and columns of the data by size[0] and size[1] respectively. - ///
- /// - /// - /// - /// - public ILayer UpSampling2D ( Shape size = null, - string data_format = null, - string interpolation = "nearest" ) - => new UpSampling2D(new UpSampling2DArgs { - Size = size ?? (2, 2) - }); + /// + /// Upsampling layer for 2D inputs.
+ /// Repeats the rows and columns of the data by size[0] and size[1] respectively. + ///
+ /// + /// + /// + /// + public ILayer UpSampling2D(Shape size, string data_format, string interpolation) + => new UpSampling2D(new UpSampling2DArgs + { + Size = size, + DataFormat = data_format, + Interpolation = interpolation + }); - /// - /// Permutes the dimensions of the input according to a given pattern. - /// - public ILayer Permute ( int[] dims ) + /// + /// Permutes the dimensions of the input according to a given pattern. + /// + public ILayer Permute ( int[] dims ) => new Permute(new PermuteArgs { dims = dims }); diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling1D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling1D.cs new file mode 100644 index 000000000..3bc8d6c6b --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling1D.cs @@ -0,0 +1,32 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Common.Types; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.Engine; + + +namespace Tensorflow.Keras.Layers +{ + /// + /// Upsampling layer for 1D inputs. + /// + public class UpSampling1D : Layer + { + UpSampling1DArgs args; + int size; + + public UpSampling1D(UpSampling1DArgs args) : base(args) + { + this.args = args; + size = args.Size; + inputSpec = new InputSpec(ndim: 3); + } + + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) + { + var output = keras.backend.repeat_elements(inputs, size, axis: 1); + return output; + } + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs index 223f33d4f..cb579d61e 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs @@ -10,6 +10,9 @@ namespace Tensorflow.Keras.Layers { + /// + /// Upsampling layer for 2D inputs. + /// public class UpSampling2D : Layer { UpSampling2DArgs args; diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Reshaping.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Reshaping.Test.cs index 748544cb0..5b16cc908 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Reshaping.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Reshaping.Test.cs @@ -1,4 +1,5 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; using Tensorflow.NumPy; using static Tensorflow.Binding; using static Tensorflow.KerasApi; @@ -18,6 +19,15 @@ public void ZeroPadding2D() Assert.AreEqual((1, 2, 3, 2), y.shape); } + [TestMethod] + public void UpSampling1D() + { + Shape input_shape = (2, 2, 3); + var x = np.arange(input_shape.size).reshape(input_shape); + var y = tf.keras.layers.UpSampling1D(size: 2).Apply(x); + Assert.AreEqual((2, 4, 3), y.shape); + } + [TestMethod] public void UpSampling2D() { From 61c927ac170fe0294191d4f9af876a4d00f41052 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Mon, 26 Jun 2023 09:24:45 -0500 Subject: [PATCH 064/182] Release v0.110.0. --- src/TensorFlowNET.Core/Tensorflow.Binding.csproj | 6 +++++- src/TensorFlowNET.Keras/Tensorflow.Keras.csproj | 2 +- .../TensorFlowNET.Graph.UnitTest.csproj | 6 +++--- .../Tensorflow.Keras.UnitTest.csproj | 6 +++--- .../Tensorflow.Native.UnitTest.csproj | 6 +++--- .../Tensorflow.Binding.UnitTest.csproj | 6 +++--- tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj | 2 +- tools/TensorFlowNET.Console/Tensorflow.Console.csproj | 2 +- 8 files changed, 20 insertions(+), 16 deletions(-) diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index 61b86168e..3bc20289a 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -10,7 +10,7 @@ enable Haiping Chen, Meinrad Recheis, Eli Belash SciSharp STACK - true + False Apache 2.0, Haiping Chen $([System.DateTime]::UtcNow.ToString(yyyy)) https://github.com/SciSharp/TensorFlow.NET git @@ -22,6 +22,9 @@ Building, training and infering deep learning models. https://tensorflownet.readthedocs.io 0.110.0.0 + tf.net 0.110.x and above are based on tensorflow native 2.11.0 + * RNN, LSTM works. + tf.net 0.100.x and above are based on tensorflow native 2.10.0 * Eager Mode is added finally. @@ -37,6 +40,7 @@ https://tensorflownet.readthedocs.io tf.net 0.6x.x aligns with TensorFlow v2.6.x native library. tf.net 0.7x.x aligns with TensorFlow v2.7.x native library. tf.net 0.10x.x aligns with TensorFlow v2.10.x native library. + tf.net 0.11x.x aligns with TensorFlow v2.11.x native library. 0.110.0.0 LICENSE diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj index 320c3b679..5dc46fe49 100644 --- a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -31,7 +31,7 @@ Keras is an API designed for human beings, not machines. Keras follows best practices for reducing cognitive load: it offers consistent & simple APIs, it minimizes the number of user actions required for common use cases, and it provides clear & actionable error messages. SciSharp STACK - true + False tensorflow, keras, deep learning, machine learning true packages diff --git a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj index c353832ad..78a0938c5 100644 --- a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj +++ b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj @@ -24,9 +24,9 @@ - - - + + + all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj index d744c3364..58c176e82 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj +++ b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj @@ -13,9 +13,9 @@ - - - + + + all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj index 9fec0e6d5..a4f1ec567 100644 --- a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj +++ b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj @@ -44,9 +44,9 @@ - - - + + + all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj index 98dadf012..240960c91 100644 --- a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj +++ b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj @@ -42,9 +42,9 @@ - - - + + + diff --git a/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj b/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj index f2495d224..dd6f9538b 100644 --- a/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj +++ b/tools/TensorFlowNET.Benchmarks/Tensorflow.Benchmark.csproj @@ -37,7 +37,7 @@ - + diff --git a/tools/TensorFlowNET.Console/Tensorflow.Console.csproj b/tools/TensorFlowNET.Console/Tensorflow.Console.csproj index c79d4845c..ecc2d30b5 100644 --- a/tools/TensorFlowNET.Console/Tensorflow.Console.csproj +++ b/tools/TensorFlowNET.Console/Tensorflow.Console.csproj @@ -20,7 +20,7 @@ - + From fff5029b0240c30ee4f2b9329c71c8665e091858 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Tue, 27 Jun 2023 23:57:48 +0800 Subject: [PATCH 065/182] fix: revise earlystopping callback's min_delta parameter --- src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs b/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs index 73ccc87b0..59152d9b2 100644 --- a/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs +++ b/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs @@ -11,7 +11,7 @@ namespace Tensorflow.Keras.Callbacks; public class EarlyStopping: ICallback { int _paitence; - int _min_delta; + float _min_delta; int _verbose; int _stopped_epoch; int _wait; @@ -26,7 +26,7 @@ public class EarlyStopping: ICallback CallbackParams _parameters; public Dictionary>? history { get; set; } // user need to pass a CallbackParams to EarlyStopping, CallbackParams at least need the model - public EarlyStopping(CallbackParams parameters,string monitor = "val_loss", int min_delta = 0, int patience = 0, + public EarlyStopping(CallbackParams parameters,string monitor = "val_loss", float min_delta = 0f, int patience = 0, int verbose = 1, string mode = "auto", float baseline = 0f, bool restore_best_weights = false, int start_from_epoch = 0) { From 81b10e37809d5ae57989f55d4102a0a367d4322c Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Wed, 28 Jun 2023 02:19:28 +0800 Subject: [PATCH 066/182] feat: Add GRUCell layer --- src/TensorFlowNET.Core/APIs/tf.tensor.cs | 13 +- .../Keras/ArgsDefinition/Rnn/GRUCellArgs.cs | 39 +++ .../Keras/Layers/ILayersApi.cs | 12 + src/TensorFlowNET.Keras/Layers/LayersApi.cs | 43 +++ src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs | 282 ++++++++++++++++++ .../Layers/Rnn.Test.cs | 13 + 6 files changed, 399 insertions(+), 3 deletions(-) create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUCellArgs.cs create mode 100644 src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs diff --git a/src/TensorFlowNET.Core/APIs/tf.tensor.cs b/src/TensorFlowNET.Core/APIs/tf.tensor.cs index 45aebc0cd..b03168ab3 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tensor.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tensor.cs @@ -68,20 +68,27 @@ public Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides = n /// A name for the operation (optional) /// if num_or_size_splits is a scalar returns num_or_size_splits Tensor objects; /// if num_or_size_splits is a 1-D Tensor returns num_or_size_splits.get_shape[0] Tensor objects resulting from splitting value. - public Tensor[] split(Tensor value, int num_split, Tensor axis, string name = null) + public Tensor[] split(Tensor value, int num_split, Axis axis, string name = null) => array_ops.split( value: value, num_or_size_splits: num_split, axis: axis, name: name); - public Tensor[] split(Tensor value, int num_split, int axis, string name = null) + public Tensor[] split(Tensor value, int[] num_split, Axis axis, string name = null) => array_ops.split( value: value, num_or_size_splits: num_split, - axis: ops.convert_to_tensor(axis), + axis: axis, name: name); + //public Tensor[] split(Tensor value, int num_split, Axis axis, string name = null) + // => array_ops.split( + // value: value, + // num_or_size_splits: num_split, + // axis: axis, + // name: name); + public Tensor ensure_shape(Tensor x, Shape shape, string name = null) { return gen_ops.ensure_shape(x, shape, name); diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUCellArgs.cs new file mode 100644 index 000000000..75d5d0218 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUCellArgs.cs @@ -0,0 +1,39 @@ +using Newtonsoft.Json; +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition.Rnn +{ + public class GRUCellArgs : AutoSerializeLayerArgs + { + [JsonProperty("units")] + public int Units { get; set; } + // TODO(Rinne): lack of initialized value of Activation. Merging keras + // into tf.net could resolve it. + [JsonProperty("activation")] + public Activation Activation { get; set; } + [JsonProperty("recurrent_activation")] + public Activation RecurrentActivation { get; set; } + [JsonProperty("use_bias")] + public bool UseBias { get; set; } = true; + [JsonProperty("dropout")] + public float Dropout { get; set; } = .0f; + [JsonProperty("recurrent_dropout")] + public float RecurrentDropout { get; set; } = .0f; + [JsonProperty("kernel_initializer")] + public IInitializer KernelInitializer { get; set; } + [JsonProperty("recurrent_initializer")] + public IInitializer RecurrentInitializer { get; set; } + [JsonProperty("bias_initializer")] + public IInitializer BiasInitializer { get; set; } + [JsonProperty("reset_after")] + public bool ResetAfter { get;set; } + [JsonProperty("implementation")] + public int Implementation { get; set; } = 2; + + + + } + +} diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index a19508d42..9bc99701d 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -246,6 +246,18 @@ public ILayer RNN( bool time_major = false ); + public IRnnCell GRUCell( + int units, + string activation = "tanh", + string recurrent_activation = "sigmoid", + bool use_bias = true, + string kernel_initializer = "glorot_uniform", + string recurrent_initializer = "orthogonal", + string bias_initializer = "zeros", + float dropout = 0f, + float recurrent_dropout = 0f, + bool reset_after = true); + public ILayer Subtract(); } } diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 0bdcbc841..d20803375 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -873,6 +873,45 @@ public ILayer LSTM(int units, UnitForgetBias = unit_forget_bias }); + /// + /// Cell class for the GRU layer. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public IRnnCell GRUCell( + int units, + string activation = "tanh", + string recurrent_activation = "sigmoid", + bool use_bias = true, + string kernel_initializer = "glorot_uniform", + string recurrent_initializer = "orthogonal", + string bias_initializer = "zeros", + float dropout = 0f, + float recurrent_dropout = 0f, + bool reset_after = true) + => new GRUCell(new GRUCellArgs + { + Units = units, + Activation = keras.activations.GetActivationFromName(activation), + RecurrentActivation = keras.activations.GetActivationFromName(recurrent_activation), + KernelInitializer = GetInitializerByName(kernel_initializer), + RecurrentInitializer = GetInitializerByName(recurrent_initializer), + BiasInitializer = GetInitializerByName(bias_initializer), + UseBias = use_bias, + Dropout = dropout, + RecurrentDropout = recurrent_dropout, + ResetAfter = reset_after + }); + /// /// /// @@ -983,5 +1022,9 @@ public ILayer Normalization(Shape? input_shape = null, int? axis = -1, float? me Variance = variance, Invert = invert }); + + + + } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs new file mode 100644 index 000000000..02fe54f49 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs @@ -0,0 +1,282 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.ArgsDefinition.Rnn; +using Tensorflow.Common.Extensions; +using Tensorflow.Common.Types; +using Tensorflow.Keras.Saving; + +namespace Tensorflow.Keras.Layers.Rnn +{ + /// + /// Cell class for the GRU layer. + /// + public class GRUCell : DropoutRNNCellMixin + { + GRUCellArgs _args; + IVariableV1 _kernel; + IVariableV1 _recurrent_kernel; + IInitializer _bias_initializer; + IVariableV1 _bias; + INestStructure _state_size; + INestStructure _output_size; + int Units; + public override INestStructure StateSize => _state_size; + + public override INestStructure OutputSize => _output_size; + + public override bool SupportOptionalArgs => false; + public GRUCell(GRUCellArgs args) : base(args) + { + _args = args; + if (_args.Units <= 0) + { + throw new ValueError( + $"units must be a positive integer, got {args.Units}"); + } + _args.Dropout = Math.Min(1f, Math.Max(0f, _args.Dropout)); + _args.RecurrentDropout = Math.Min(1f, Math.Max(0f, this._args.RecurrentDropout)); + if (_args.RecurrentDropout != 0f && _args.Implementation != 1) + { + Debug.WriteLine("RNN `implementation=2` is not supported when `recurrent_dropout` is set." + + "Using `implementation=1`."); + _args.Implementation = 1; + } + Units = _args.Units; + _state_size = new NestList(Units); + _output_size = new NestNode(Units); + } + + public override void build(KerasShapesWrapper input_shape) + { + //base.build(input_shape); + + var single_shape = input_shape.ToSingleShape(); + var input_dim = single_shape[-1]; + + _kernel = add_weight("kernel", (input_dim, _args.Units * 3), + initializer: _args.KernelInitializer + ); + + _recurrent_kernel = add_weight("recurrent_kernel", (Units, Units * 3), + initializer: _args.RecurrentInitializer + ); + if (_args.UseBias) + { + Shape bias_shape; + if (!_args.ResetAfter) + { + bias_shape = new Shape(3 * Units); + } + else + { + bias_shape = (2, 3 * Units); + } + _bias = add_weight("bias", bias_shape, + initializer: _bias_initializer + ); + } + built = true; + } + + protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null) + { + var h_tm1 = states.IsNested() ? states[0] : states.Single(); + var dp_mask = get_dropout_mask_for_cell(inputs, training.Value, count: 3); + var rec_dp_mask = get_recurrent_dropout_mask_for_cell(h_tm1, training.Value, count: 3); + + IVariableV1 input_bias = _bias; + IVariableV1 recurrent_bias = _bias; + if (_args.UseBias) + { + if (!_args.ResetAfter) + { + input_bias = _bias; + recurrent_bias = null; + } + else + { + input_bias = tf.Variable(tf.unstack(_bias.AsTensor())[0]); + recurrent_bias = tf.Variable(tf.unstack(_bias.AsTensor())[1]); + } + } + + + Tensor hh; + Tensor z; + if ( _args.Implementation == 1) + { + Tensor inputs_z; + Tensor inputs_r; + Tensor inputs_h; + if (0f < _args.Dropout && _args.Dropout < 1f) + { + inputs_z = inputs * dp_mask[0]; + inputs_r = inputs * dp_mask[1]; + inputs_h = inputs * dp_mask[2]; + } + else + { + inputs_z = inputs.Single(); + inputs_r = inputs.Single(); + inputs_h = inputs.Single(); + } + + + int startIndex = (int)_kernel.AsTensor().shape[0]; + var _kernel_slice = tf.slice(_kernel.AsTensor(), + new[] { 0, 0 }, new[] { startIndex, Units }); + var x_z = math_ops.matmul(inputs_z, _kernel_slice); + _kernel_slice = tf.slice(_kernel.AsTensor(), + new[] { 0, Units }, new[] { Units, Units}); + var x_r = math_ops.matmul( + inputs_r, _kernel_slice); + int endIndex = (int)_kernel.AsTensor().shape[1]; + _kernel_slice = tf.slice(_kernel.AsTensor(), + new[] { 0, Units * 2 }, new[] { startIndex, endIndex - Units * 2 }); + var x_h = math_ops.matmul(inputs_h, _kernel_slice); + + if(_args.UseBias) + { + x_z = tf.nn.bias_add( + x_z, tf.Variable(input_bias.AsTensor()[$":{Units}"])); + x_r = tf.nn.bias_add( + x_r, tf.Variable(input_bias.AsTensor()[$"{Units}:{Units * 2}"])); + x_h = tf.nn.bias_add( + x_h, tf.Variable(input_bias.AsTensor()[$"{Units * 2}:"])); + } + + Tensor h_tm1_z; + Tensor h_tm1_r; + Tensor h_tm1_h; + if (0f < _args.RecurrentDropout && _args.RecurrentDropout < 1f) + { + h_tm1_z = h_tm1 * rec_dp_mask[0]; + h_tm1_r = h_tm1 * rec_dp_mask[1]; + h_tm1_h = h_tm1 * rec_dp_mask[2]; + } + else + { + h_tm1_z = h_tm1; + h_tm1_r = h_tm1; + h_tm1_h = h_tm1; + } + + startIndex = (int)_recurrent_kernel.AsTensor().shape[0]; + var _recurrent_kernel_slice = tf.slice(_recurrent_kernel.AsTensor(), + new[] { 0, 0 }, new[] { startIndex, Units }); + var recurrent_z = math_ops.matmul( + h_tm1_z, _recurrent_kernel_slice); + _recurrent_kernel_slice = tf.slice(_recurrent_kernel.AsTensor(), + new[] { 0, Units }, new[] { startIndex, Units}); + var recurrent_r = math_ops.matmul( + h_tm1_r, _recurrent_kernel_slice); + if(_args.ResetAfter && _args.UseBias) + { + recurrent_z = tf.nn.bias_add( + recurrent_z, tf.Variable(recurrent_bias.AsTensor()[$":{Units}"])); + recurrent_r = tf.nn.bias_add( + recurrent_r, tf.Variable(recurrent_bias.AsTensor()[$"{Units}: {Units * 2}"])); + } + z = _args.RecurrentActivation.Apply(x_z + recurrent_z); + var r = _args.RecurrentActivation.Apply(x_r + recurrent_r); + + Tensor recurrent_h; + if (_args.ResetAfter) + { + endIndex = (int)_recurrent_kernel.AsTensor().shape[1]; + _recurrent_kernel_slice = tf.slice(_recurrent_kernel.AsTensor(), + new[] { 0, Units * 2 }, new[] { startIndex, endIndex - Units * 2 }); + recurrent_h = math_ops.matmul( + h_tm1_h, _recurrent_kernel_slice); + if(_args.UseBias) + { + recurrent_h = tf.nn.bias_add( + recurrent_h, tf.Variable(recurrent_bias.AsTensor()[$"{Units * 2}:"])); + } + recurrent_h *= r; + } + else + { + _recurrent_kernel_slice = tf.slice(_recurrent_kernel.AsTensor(), + new[] { 0, Units * 2 }, new[] { startIndex, endIndex - Units * 2 }); + recurrent_h = math_ops.matmul( + r * h_tm1_h, _recurrent_kernel_slice); + } + hh = _args.Activation.Apply(x_h + recurrent_h); + } + else + { + if (0f < _args.Dropout && _args.Dropout < 1f) + { + inputs = inputs * dp_mask[0]; + } + + var matrix_x = math_ops.matmul(inputs, _kernel.AsTensor()); + if(_args.UseBias) + { + matrix_x = tf.nn.bias_add(matrix_x, input_bias); + } + var matrix_x_spilted = tf.split(matrix_x, 3, axis: -1); + var x_z = matrix_x_spilted[0]; + var x_r = matrix_x_spilted[1]; + var x_h = matrix_x_spilted[2]; + + Tensor matrix_inner; + if (_args.ResetAfter) + { + matrix_inner = math_ops.matmul(h_tm1, _recurrent_kernel.AsTensor()); + if ( _args.UseBias) + { + matrix_inner = tf.nn.bias_add( + matrix_inner, recurrent_bias); + } + } + else + { + var startIndex = (int)_recurrent_kernel.AsTensor().shape[0]; + var _recurrent_kernel_slice = tf.slice(_recurrent_kernel.AsTensor(), + new[] { 0, 0 }, new[] { startIndex, Units * 2 }); + matrix_inner = math_ops.matmul( + h_tm1, _recurrent_kernel_slice); + } + + var matrix_inner_splitted = tf.split(matrix_inner, new int[] {Units, Units, -1}, axis:-1); + var recurrent_z = matrix_inner_splitted[0]; + var recurrent_r = matrix_inner_splitted[0]; + var recurrent_h = matrix_inner_splitted[0]; + + z = _args.RecurrentActivation.Apply(x_z + recurrent_z); + var r = _args.RecurrentActivation.Apply(x_r + recurrent_r); + + if(_args.ResetAfter) + { + recurrent_h = r * recurrent_h; + } + else + { + var startIndex = (int)_recurrent_kernel.AsTensor().shape[0]; + var endIndex = (int)_recurrent_kernel.AsTensor().shape[1]; + var _recurrent_kernel_slice = tf.slice(_recurrent_kernel.AsTensor(), + new[] { 0, 2*Units }, new[] { startIndex, endIndex - 2 * Units }); + recurrent_h = math_ops.matmul( + r * h_tm1, _recurrent_kernel_slice); + } + hh = _args.Activation.Apply(x_h + recurrent_h); + } + var h = z * h_tm1 + (1 - z) * hh; + if (states.IsNested()) + { + var new_state = new NestList(h); + return new Nest(new INestStructure[] { new NestNode(h), new_state }).ToTensors(); + } + else + { + return new Nest(new INestStructure[] { new NestNode(h), new NestNode(h)}).ToTensors(); + } + + } + } +} diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index 8eeee7a88..becdbcd60 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -132,5 +132,18 @@ public void RNNForLSTMCell() Console.WriteLine($"output: {output}"); Assert.AreEqual((5, 4), output.shape); } + + [TestMethod] + public void GRUCell() + { + var inputs = tf.random.normal((32, 10, 8)); + var rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4)); + var output = rnn.Apply(inputs); + Assert.AreEqual((32, 4), output.shape); + rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4, reset_after:false, use_bias:false)); + output = rnn.Apply(inputs); + Assert.AreEqual((32, 4), output.shape); + + } } } From 8ebe3e31e30acc5c9659146a908ccdacaf36df88 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Thu, 29 Jun 2023 22:22:21 +0800 Subject: [PATCH 067/182] fix: fix the bug of repeated progress bar in Model.fit() --- .../Keras/Engine/ICallback.cs | 3 + src/TensorFlowNET.Core/Keras/Engine/IModel.cs | 2 +- .../Callbacks/CallbackList.cs | 5 ++ .../Callbacks/Earlystopping.cs | 4 ++ src/TensorFlowNET.Keras/Callbacks/History.cs | 4 ++ .../Callbacks/ProgbarLogger.cs | 3 + .../Engine/Model.Evaluate.cs | 57 ++++++++----------- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 2 +- 8 files changed, 45 insertions(+), 35 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/Engine/ICallback.cs b/src/TensorFlowNET.Core/Keras/Engine/ICallback.cs index 096dbd2ef..e114ca97f 100644 --- a/src/TensorFlowNET.Core/Keras/Engine/ICallback.cs +++ b/src/TensorFlowNET.Core/Keras/Engine/ICallback.cs @@ -14,6 +14,9 @@ public interface ICallback void on_predict_batch_end(long end_step, Dictionary logs); void on_predict_end(); void on_test_begin(); + void on_test_end(Dictionary logs); void on_test_batch_begin(long step); void on_test_batch_end(long end_step, Dictionary logs); + + } diff --git a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs index ddc72aeec..19f3df9ba 100644 --- a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs +++ b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs @@ -60,7 +60,7 @@ void load_weights(string filepath, bool skip_mismatch = false, object options = null); - Dictionary evaluate(Tensor x, Tensor y, + Dictionary evaluate(NDArray x, NDArray y, int batch_size = -1, int verbose = 1, int steps = -1, diff --git a/src/TensorFlowNET.Keras/Callbacks/CallbackList.cs b/src/TensorFlowNET.Keras/Callbacks/CallbackList.cs index 362f2280c..cb16aafa3 100644 --- a/src/TensorFlowNET.Keras/Callbacks/CallbackList.cs +++ b/src/TensorFlowNET.Keras/Callbacks/CallbackList.cs @@ -73,4 +73,9 @@ public void on_test_batch_end(long end_step, Dictionary logs) { callbacks.ForEach(x => x.on_test_batch_end(end_step, logs)); } + + public void on_test_end(Dictionary logs) + { + callbacks.ForEach(x => x.on_test_end(logs)); + } } diff --git a/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs b/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs index 59152d9b2..b3b78423c 100644 --- a/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs +++ b/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs @@ -150,4 +150,8 @@ public bool _is_improvement(float monitor_value, float reference_value) return less_op; } } + + public void on_test_end(Dictionary logs) + { + } } diff --git a/src/TensorFlowNET.Keras/Callbacks/History.cs b/src/TensorFlowNET.Keras/Callbacks/History.cs index c34f253d1..6d3ff6c38 100644 --- a/src/TensorFlowNET.Keras/Callbacks/History.cs +++ b/src/TensorFlowNET.Keras/Callbacks/History.cs @@ -81,4 +81,8 @@ public void on_test_batch_begin(long step) public void on_test_batch_end(long end_step, Dictionary logs) { } + + public void on_test_end(Dictionary logs) + { + } } diff --git a/src/TensorFlowNET.Keras/Callbacks/ProgbarLogger.cs b/src/TensorFlowNET.Keras/Callbacks/ProgbarLogger.cs index 9f2b1eb31..23b18cd47 100644 --- a/src/TensorFlowNET.Keras/Callbacks/ProgbarLogger.cs +++ b/src/TensorFlowNET.Keras/Callbacks/ProgbarLogger.cs @@ -118,5 +118,8 @@ public void on_test_batch_end(long end_step, Dictionary logs) } } + public void on_test_end(Dictionary logs) + { + } } } diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index eaa9eb23c..c4761f873 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -27,7 +27,7 @@ public partial class Model /// /// /// - public Dictionary evaluate(Tensor x, Tensor y, + public Dictionary evaluate(NDArray x, NDArray y, int batch_size = -1, int verbose = 1, int steps = -1, @@ -115,62 +115,53 @@ public Dictionary evaluate(IDatasetV2 x, int verbose = 1, bool is /// The function to be called on each batch of data. /// Whether it is validation or test. /// - Dictionary evaluate(DataHandler data_handler, CallbackList callbacks, bool is_val, Func> test_func) + Dictionary evaluate(DataHandler data_handler, CallbackList callbacks, bool is_val, Func> test_func) { callbacks.on_test_begin(); - var results = new Dictionary(); - var logs = results; + var logs = new Dictionary(); foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) { reset_metrics(); - callbacks.on_epoch_begin(epoch); - // data_handler.catch_stop_iteration(); - foreach (var step in data_handler.steps()) { callbacks.on_test_batch_begin(step); - - logs = test_func(data_handler, iterator.next()); - - tf_with(ops.control_dependencies(Array.Empty()), ctl => _train_counter.assign_add(1)); - + logs = test_func(data_handler, iterator); var end_step = step + data_handler.StepIncrement; if (!is_val) callbacks.on_test_batch_end(end_step, logs); } - - if (!is_val) - callbacks.on_epoch_end(epoch, logs); } - - foreach (var log in logs) - { - results[log.Key] = log.Value; - } - + callbacks.on_test_end(logs); + var results = new Dictionary(logs); return results; } - Dictionary test_function(DataHandler data_handler, Tensor[] data) + Dictionary test_function(DataHandler data_handler, OwnedIterator iterator) { - var (x, y) = data_handler.DataAdapter.Expand1d(data[0], data[1]); - - var y_pred = Apply(x, training: false); - var loss = compiled_loss.Call(y, y_pred); - - compiled_metrics.update_state(y, y_pred); - - var outputs = metrics.Select(x => (x.Name, x.result())).ToDictionary(x => x.Name, x => (float)x.Item2); + var data = iterator.next(); + var outputs = test_step(data_handler, data[0], data[1]); + tf_with(ops.control_dependencies(new object[0]), ctl => _test_counter.assign_add(1)); return outputs; } - Dictionary test_step_multi_inputs_function(DataHandler data_handler, Tensor[] data) + Dictionary test_step_multi_inputs_function(DataHandler data_handler, OwnedIterator iterator) { + var data = iterator.next(); var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; - var outputs = train_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray())); - tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1)); + var outputs = test_step(data_handler, data.Take(x_size).ToArray(), data.Skip(x_size).ToArray()); + tf_with(ops.control_dependencies(new object[0]), ctl => _test_counter.assign_add(1)); return outputs; } + + + Dictionary test_step(DataHandler data_handler, Tensors x, Tensors y) + { + (x, y) = data_handler.DataAdapter.Expand1d(x, y); + var y_pred = Apply(x, training: false); + var loss = compiled_loss.Call(y, y_pred); + compiled_metrics.update_state(y, y_pred); + return metrics.Select(x => (x.Name, x.result())).ToDictionary(x => x.Item1, x => (float)x.Item2); + } } } diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index 68dc5976c..76c592ad6 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -266,7 +266,7 @@ History FitInternal(DataHandler data_handler, int epochs, int verbose, List Date: Fri, 30 Jun 2023 16:16:00 +0300 Subject: [PATCH 068/182] Bug fix in KerasObjectLoader.cs I added `ToArray()` so that there is no "The collection has changed" error after `_delete_tracking`. --- src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs index 396ad20eb..1e869d666 100644 --- a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs +++ b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs @@ -174,7 +174,7 @@ public void del_tracking() } if(node is Functional functional) { - foreach(var name in functional.UnconditionalDependencyNames.Keys) + foreach(var name in functional.UnconditionalDependencyNames.Keys.ToArray()) { if(Regex.Match(name, @"^layer(_with_weights)?-[\d+]").Success) { From f61ab520c91de2b25bf09356735b9617278f5a44 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 30 Jun 2023 21:25:35 +0800 Subject: [PATCH 069/182] fix inconsistent shape error while training Embedding layer. --- src/TensorFlowNET.Core/Framework/IndexedSlices.cs | 15 ++++++++++++++- .../Layers/LayersTest.cs | 11 +++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/Framework/IndexedSlices.cs b/src/TensorFlowNET.Core/Framework/IndexedSlices.cs index 24d356fbb..bac5e6fb1 100644 --- a/src/TensorFlowNET.Core/Framework/IndexedSlices.cs +++ b/src/TensorFlowNET.Core/Framework/IndexedSlices.cs @@ -49,12 +49,25 @@ public IndexedSlices(Tensor values, Tensor indices, Tensor dense_shape = null) public static implicit operator Tensor(IndexedSlices indexedSlices) { - return indexedSlices.values; + return _indexed_slices_to_tensor(indexedSlices); } public static implicit operator IndexedSlices(Tensor tensor) { return tensor.Tag as IndexedSlices; } + + /// + /// Converts an IndexedSlices object `value` to a Tensor. + /// + /// + /// + /// + /// + /// + public static Tensor _indexed_slices_to_tensor(IndexedSlices indexedSlices, TF_DataType dtype = TF_DataType.DtInvalid, String name = "", bool as_ref = false) + { + return gen_math_ops.unsorted_segment_sum(indexedSlices.values, indexedSlices.indices, indexedSlices.dense_shape.slice(0)); + } } } diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs index 98d909668..7ebb53db3 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs @@ -110,6 +110,17 @@ public void Embedding() var output_array = model.predict(input_array); Assert.AreEqual((32, 10, 64), output_array.shape); } + [TestMethod] + public void EmbeddingGrad() + { + var inputs = keras.layers.Input(shape: new[] { 32, 10 }); + var outputs = keras.layers.Embedding(1000, 64, input_length: 10).Apply(inputs); + var model = keras.Model(inputs: inputs, outputs: outputs); + var input_array = np.random.randint(1000, size: (1, 32, 10)); + var output_array = np.random.random(size: (1, 32, 10, 64)); + model.compile("rmsprop", "mse", new[] { "accuracy" }); + model.fit(input_array, output_array); + } /// /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense From 3acfc1dcb0bb978e69184858712765c39c03ef0c Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Fri, 30 Jun 2023 12:50:16 -0500 Subject: [PATCH 070/182] tf.math.reduce_euclidean_norm --- src/TensorFlowNET.Core/APIs/tf.math.cs | 11 +++++++++ src/TensorFlowNET.Core/Operations/math_ops.cs | 11 +++++++++ .../ManagedAPI/MathApiTest.cs | 23 +++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index ffbc43738..c999933cf 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -46,6 +46,17 @@ public Tensor multiply(Tensor x, Tensor y, string name = null) public Tensor divide_no_nan(Tensor a, Tensor b, string name = null) => math_ops.div_no_nan(a, b); + /// + /// Computes the Euclidean norm of elements across dimensions of a tensor. + /// + /// The tensor to reduce. Should have numeric type. + /// The dimensions to reduce. If `None` (the default), reduces all dimensions.Must be in the range `[-rank(input_tensor), rank(input_tensor))` + /// If true, retains reduced dimensions with length 1. + /// A name for the operation (optional). + /// The reduced tensor, of the same dtype as the input_tensor. + public Tensor reduce_euclidean_norm(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null) + => math_ops.reduce_euclidean_norm(input_tensor, axis: axis, keepdims: keepdims, name); + public Tensor square(Tensor x, string name = null) => math_ops.square(x, name: name); diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index d00a5d367..6d3860528 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -587,6 +587,17 @@ public static Tensor reduce_any(Tensor input_tensor, Axis axis = null, bool keep return _may_reduce_to_scalar(keepdims, axis, max); } + public static Tensor reduce_euclidean_norm(Tensor input_tensor, Axis axis = null, bool keepdims = false, string name = null) + { + var r = _ReductionDims(input_tensor, axis); + var distance = tf.Context.ExecuteOp("EuclideanNorm", name, + new ExecuteOpArgs(input_tensor, r).SetAttributes(new + { + keep_dims = keepdims + })); + return _may_reduce_to_scalar(keepdims, axis, distance); + } + public static Tensor reduce_max(Tensor input_tensor, Axis axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/MathApiTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/MathApiTest.cs index 42ac641b1..411deb18f 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/MathApiTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/MathApiTest.cs @@ -1,6 +1,8 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; using System.Linq; using Tensorflow; +using Tensorflow.NumPy; using static Tensorflow.Binding; namespace TensorFlowNET.UnitTest.ManagedAPI @@ -57,5 +59,26 @@ public void Erf() var actual = erf.ToArray(); Assert.IsTrue(Equal(expected, actual)); } + + [TestMethod] + public void ReduceEuclideanNorm() + { + var x = tf.constant(new[,] { { 1, 2, 3 }, { 1, 1, 1 } }); + Assert.AreEqual(tf.math.reduce_euclidean_norm(x).numpy(), 4); + + var y = tf.constant(new[,] { { 1, 2, 3 }, { 1, 1, 1 } }, dtype: tf.float32); + Assert.IsTrue(Equal(tf.math.reduce_euclidean_norm(y).numpy(), 4.1231055f)); + + Assert.IsTrue(Equal(tf.math.reduce_euclidean_norm(y, 0).ToArray(), + new float[] { np.sqrt(2f), np.sqrt(5f), np.sqrt(10f) })); + + Assert.IsTrue(Equal(tf.math.reduce_euclidean_norm(y, 1).ToArray(), + new float[] { np.sqrt(14f), np.sqrt(3f) })); + + Assert.IsTrue(Equal(tf.math.reduce_euclidean_norm(y, 1, keepdims: true).ToArray(), + new float[] { np.sqrt(14f), np.sqrt(3f) })); + + Assert.AreEqual(tf.math.reduce_euclidean_norm(y, (0, 1)).numpy(), np.sqrt(17f)); + } } } From 4efa0a8881a5886a2eeb2e19d8a5157c3f68a32f Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Sat, 1 Jul 2023 13:44:54 +0800 Subject: [PATCH 071/182] add pad preprocessing for `imdb` dataset --- src/TensorFlowNET.Keras/Datasets/Imdb.cs | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 56b0d2a77..61ce39475 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -40,6 +40,8 @@ public DatasetPass load_data(string path = "imdb.npz", int oov_char= 2, int index_from = 3) { + if (maxlen == -1) throw new InvalidArgumentError("maxlen must be assigned."); + var dst = Download(); var lines = File.ReadAllLines(Path.Combine(dst, "imdb_train.txt")); @@ -51,7 +53,7 @@ public DatasetPass load_data(string path = "imdb.npz", x_train_string[i] = lines[i].Substring(2); } - var x_train = np.array(x_train_string); + var x_train = keras.preprocessing.sequence.pad_sequences(PraseData(x_train_string), maxlen: maxlen); File.ReadAllLines(Path.Combine(dst, "imdb_test.txt")); var x_test_string = new string[lines.Length]; @@ -62,7 +64,7 @@ public DatasetPass load_data(string path = "imdb.npz", x_test_string[i] = lines[i].Substring(2); } - var x_test = np.array(x_test_string); + var x_test = keras.preprocessing.sequence.pad_sequences(PraseData(x_test_string), maxlen: maxlen); return new DatasetPass { @@ -93,5 +95,23 @@ string Download() return dst; // return Path.Combine(dst, file_name); } + + protected IEnumerable PraseData(string[] x) + { + var data_list = new List(); + for (int i = 0; i < len(x); i++) + { + var list_string = x[i]; + var cleaned_list_string = list_string.Replace("[", "").Replace("]", "").Replace(" ", ""); + string[] number_strings = cleaned_list_string.Split(','); + int[] numbers = new int[number_strings.Length]; + for (int j = 0; j < number_strings.Length; j++) + { + numbers[j] = int.Parse(number_strings[j]); + } + data_list.Add(numbers); + } + return data_list; + } } } From a76cd67d3060aabb8f658fc11146c1dc9bccaa0c Mon Sep 17 00:00:00 2001 From: Beacontownfc <19636977267@qq.com> Date: Mon, 3 Jul 2023 13:26:45 +0000 Subject: [PATCH 072/182] fix some api's bug --- src/TensorFlowNET.Core/APIs/tf.nn.cs | 12 ++---------- src/TensorFlowNET.Core/Operations/array_ops.cs | 1 - 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index e5cd4e569..397c68c7c 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -144,16 +144,8 @@ public Tensor batch_normalization(Tensor x, Tensor offset, Tensor scale, float variance_epsilon, - string name = null) - { - var inv = math_ops.rsqrt(variance + variance_epsilon); - tf_with(ops.name_scope(name, "batchnorm", (x, mean, variance, scale, offset)), scope => - { - if (scale != null) inv *= scale; - }); - if (offset != null) return x * math_ops.cast(inv, x.dtype) + math_ops.cast(offset - mean * inv, dtype: x.dtype); - else return x * math_ops.cast(inv, x.dtype) + math_ops.cast(-mean * inv, dtype: x.dtype); - } + string name = null) => nn_impl.batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name); + public Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null) => nn_ops.max_pool(value, ksize, strides, padding, data_format: data_format, name: name); diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 7f787533a..fbb3bf119 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -678,7 +678,6 @@ public static Tensor stop_gradient(Tensor input, string name = null) var tape = tf.GradientTape().stop_recording(); var result = gen_array_ops.stop_gradient(input, name); tape.StartRecord(); - tf.GradientTape().PushTape(tape); return result; } From f026963a7da0cf8444f05fd4abce8962b5848c62 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Wed, 5 Jul 2023 21:46:50 +0800 Subject: [PATCH 073/182] add EinsumGrad --- src/TensorFlowNET.Core/Gradients/math_grad.cs | 131 ++++++++++++++++++ 1 file changed, 131 insertions(+) diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index be1fbbba7..8c3f0f8bd 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -117,6 +117,137 @@ public static Tensor[] _DivNoNanGrad(Operation op, Tensor[] grads) }; } + public static string ellipsis = "..."; + [RegisterGradient("Einsum")] + public static Tensor[] _EinsumGrad(Operation op, Tensor[] grads) + { + // Gradient for Einsum. + string equation = (string)op.get_attr("equation"); + string[] split_equation = equation.Split(new string[] { "->" }, StringSplitOptions.None); + var input_subs = split_equation[0]; + var output_subs = split_equation[1]; + + if (op.inputs.Length == 1) + { + var input_shape = array_ops.shape(op.inputs[0]); + var reduced_label_set = new HashSet(new HashSet(input_subs).Except(new HashSet(output_subs + ellipsis))); + if (reduced_label_set.Count == 0) + return new Tensor[] { math_ops.einsum(string.Format("{0}->{1}", output_subs, input_subs), new Tensors(grads)) }; + return new Tensor[] { _GetGradReduced(new Tensors(grads), output_subs, input_subs, input_shape, reduced_label_set) }; + } + + string[] split_input_subs = input_subs.Split(new string[] { "," }, StringSplitOptions.None); + var x_subs = split_input_subs[0]; + var y_subs = split_input_subs[1]; + // Add ellipsis for broadcasted dimensions if any operand does not have it. + // This is because the equation "...ij,jk->ik" may be valid if the 0th input's + // batch shape is empty, but the VJP equation "jk,ik->...ij" is not valid + // because only the output subscripts contain ellipsis. + if (output_subs.Contains(ellipsis)) + { + if (!x_subs.Contains(ellipsis)) + x_subs += ellipsis; + if (!y_subs.Contains(ellipsis)) + y_subs += ellipsis; + } + // Obtain the gradients wrt the inputs x and y, without taking into account + // the unbroadcasting. + var x = op.inputs[0]; + var y = op.inputs[1]; + if (grads.GetDataType().is_complex()) + { + x = math_ops.conj(x); + y = math_ops.conj(y); + } + + var x_shape = array_ops.shape(x); + var y_shape = array_ops.shape(y); + var grad_x = _GetGradWrt(grads, y, x_shape, x_subs, y_subs, output_subs); + var grad_y = _GetGradWrt(grads, x, y_shape, y_subs, x_subs, output_subs); + + if (!output_subs.Contains(ellipsis)) + return new Tensor[] { grad_x, grad_y }; + var bx = _GetBcastSubshape(x_subs); + int bx_start = bx[0], bx_end = bx[1]; + var by = _GetBcastSubshape(y_subs); + int by_start = by[0], by_end = by[1]; + + var x_shape_static = x.shape; + var y_shape_static = y.shape; + if(x_shape_static.IsFullyDefined && + y_shape_static.IsFullyDefined && + x_shape_static[string.Format("{0}:{1}",bx_start,bx_end)] == y_shape_static[string.Format("{0}:{1}", by_start, by_end)]) + return new Tensor[] { grad_x, grad_y }; + + var r = gen_array_ops.broadcast_gradient_args(x_shape[string.Format("{0}:{1}", bx_start, bx_end)], + y_shape[string.Format("{0}:{1}", by_start, by_end)]); + var rx = r[0]; + var ry = r[1]; + grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, bx_start + rx), x_shape); + grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, by_start + ry), y_shape); + return new Tensor[] { grad_x, grad_y }; + } + protected static Tensor _GetGradWrt(Tensor[] output_grads, Tensor other_operand, Tensor input_shape, + string input_subs, string other_subs, string output_subs) + { + var reduced_label_set = new HashSet(new HashSet(input_subs).Except(new HashSet(output_subs + other_subs + "."))); + var left_subs = string.Join("", input_subs.Where(s => !reduced_label_set.Contains(s))); + var grad_reduced = math_ops.einsum(string.Format("{0},{1}->{2}", output_subs, other_subs, left_subs), new Tensors((Tensors)output_grads, other_operand)); + if (reduced_label_set.Count == 0) + return grad_reduced; + return _GetGradReduced(grad_reduced, left_subs, input_subs, input_shape, reduced_label_set); + } + protected static Tensor _GetGradReduced(Tensor output_grad, string output_subs, string input_subs, Tensor input_shape, HashSet reduced_label_set) + { + string reduced_subs; + Tensor reduced_dims; + List reduced_axes; + _GetReducedSubscripts(reduced_label_set, input_shape, input_subs, out reduced_subs, out reduced_dims, out reduced_axes); + bool has_repeated_labels = ( + new HashSet(input_subs).Count + new HashSet(output_subs).Count < + input_subs.Length + output_subs.Length); + var input_subs_without_reduced_labels = string.Join("", input_subs.Where(s => !reduced_label_set.Contains(s))); + + if (!has_repeated_labels && input_subs_without_reduced_labels == output_subs) + { + var reduced_shape = math_ops.reduced_shape(input_shape, ops.convert_to_tensor(reduced_axes)); + return gen_array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), input_shape); + } + else + { + var grad_shape_with_reduced_labels = array_ops.concat(new Tensor[] { reduced_dims, array_ops.shape(new Tensors(output_grad)) }, axis: 0); + var reduced_shape = array_ops.concat(new Tensor[] { array_ops.ones(reduced_label_set.Count, dtype: dtypes.int32), array_ops.shape(new Tensors(output_grad)) }, axis: 0); + var broadcasted_grad = gen_array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), grad_shape_with_reduced_labels); + return math_ops.einsum(string.Format("{0}->{1}", reduced_subs + output_subs, input_subs), new Tensors(broadcasted_grad)); + } + } + protected static void _GetReducedSubscripts(HashSet reduced_label_set, Tensor input_shape, string subscripts, out string reduced_subs, out Tensor reduced_dims, out List reduced_axes) + { + reduced_subs = string.Join("", reduced_label_set.Select(c => c.ToString())); + reduced_axes = reduced_subs.Select(s => _GetAxisFromLabel(subscripts, s)).ToList(); + reduced_dims = array_ops.stack(reduced_axes.Select(ax => input_shape[ax]).ToList()); + } + protected static int _GetAxisFromLabel(string subscripts, char label) + { + var splits = subscripts.Split(new string[] { ellipsis }, StringSplitOptions.None); + var index = splits[0].IndexOf(label); + if (index != -1) return index; + if (splits.Length < 2) throw new OutOfRangeError(); + index = splits[1].IndexOf(label); + if (index != -1) return index; + throw new ValueError(); + } + protected static int[] _GetBcastSubshape(string subscripts) + { + int start = subscripts.IndexOf(ellipsis); + if (start == -1) return new int[] { 0, 0 }; + int remaining = subscripts.Length - (start + ellipsis.Length); + int end; + if (remaining > 0) end = remaining; + else throw new Exception(); + return new int[] { start, end }; + } + /// /// Returns grad * exp(x). /// From 6862d3a0432b0623bba23e51c14d42ac1974e22f Mon Sep 17 00:00:00 2001 From: Beacontownfc <19636977267@qq.com> Date: Fri, 7 Jul 2023 00:25:38 +0000 Subject: [PATCH 074/182] Add AdamW optimizer --- src/TensorFlowNET.Core/Keras/IOptimizerApi.cs | 21 ++++++ src/TensorFlowNET.Keras/Optimizers/AdamW.cs | 67 +++++++++++++++++++ .../Optimizers/OptimizerApi.cs | 16 +++++ 3 files changed, 104 insertions(+) create mode 100644 src/TensorFlowNET.Keras/Optimizers/AdamW.cs diff --git a/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs b/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs index 961ce91ae..d0d3a74f1 100644 --- a/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs +++ b/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs @@ -25,6 +25,27 @@ IOptimizer Adam(float learning_rate = 0.001f, bool amsgrad = false, string name = "Adam"); + /// + /// Adam enables L2 weight decay on gradients. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + IOptimizer AdamW(float learning_rate = 0.001f, + float weight_decay = 0.004f, + float beta_1 = 0.9f, + float beta_2 = 0.999f, + float epsilon = 1e-7f, + bool amsgrad = false, + List no_decay_params = null, + string name = "AdamW"); + /// /// Construct a new RMSprop optimizer. /// diff --git a/src/TensorFlowNET.Keras/Optimizers/AdamW.cs b/src/TensorFlowNET.Keras/Optimizers/AdamW.cs new file mode 100644 index 000000000..469b8ad28 --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizers/AdamW.cs @@ -0,0 +1,67 @@ +namespace Tensorflow.Keras.Optimizers +{ + public class AdamW : Adam + { + string name; + float weight_decay; + DeviceDType deType; + List no_decay_params = null; + public AdamW(float learning_rate= 0.001f, + float weight_decay= 0.004f, + float beta_1= 0.9f, + float beta_2= 0.999f, + float epsilon= 1e-7f, + bool amsgrad = false, + List no_decay_params = null, + string name= "AdamW") : base(learning_rate, beta_1, beta_2, epsilon, amsgrad) + { + this.name = name; + this.weight_decay = weight_decay; + this.no_decay_params = no_decay_params; + } + + protected Operation _decay_weights_op(IVariableV1 var, float learning_rate, Dictionary> apply_state) + { + var device_dtype = new DeviceDType(); + device_dtype.DType = var.dtype; + device_dtype.Device = var.Device; + bool do_decay = _do_use_weight_decay(var.Name); + if (do_decay) return var.assign_add( + -learning_rate * var.AsTensor() * apply_state[deType]["weight_decay"]); + return tf.no_op(); + } + + + protected bool _do_use_weight_decay(string param_name) + { + // Whether to use L2 weight decay for `param_name`. + if (this.weight_decay == 0) + return false; + + if (this.no_decay_params != null) + { + foreach (var name in no_decay_params) + { + if (param_name.Contains(name)) return false; + } + + } + return true; + } + + protected override Operation _resource_apply_dense(IVariableV1 var, Tensor grad, Dictionary> apply_state) + { + var decay = _decay_weights_op(var, _hyper["learning_rate"], apply_state); + tf.control_dependencies(new[] { decay }); + return base._resource_apply_dense(var, grad, apply_state); + } + + protected override void _prepare_local(DeviceDType device_dtype, Dictionary> apply_state) + { + this.deType = device_dtype; + base._prepare_local(device_dtype, apply_state); + apply_state[device_dtype]["weight_decay"] = tf.constant( + weight_decay, name: "adam_weight_decay_rate"); + } + } +} diff --git a/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs b/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs index 31eb88be7..280694268 100644 --- a/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs +++ b/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs @@ -29,6 +29,22 @@ public IOptimizer Adam(float learning_rate = 0.001f, amsgrad: amsgrad, name: name); + public IOptimizer AdamW(float learning_rate = 0.001f, + float weight_decay = 0.004f, + float beta_1 = 0.9f, + float beta_2 = 0.999f, + float epsilon = 1e-7f, + bool amsgrad = false, + List no_decay_params = null, + string name = "AdamW") => new AdamW(learning_rate: learning_rate, + beta_1: beta_1, + beta_2: beta_2, + epsilon: epsilon, + amsgrad: amsgrad, + name: name, + weight_decay: weight_decay, + no_decay_params: no_decay_params); + /// /// Construct a new RMSprop optimizer. /// From cc6ddc144fa85010b111df2b4c596c7230052080 Mon Sep 17 00:00:00 2001 From: Beacontownfc <19636977267@qq.com> Date: Fri, 7 Jul 2023 00:33:41 +0000 Subject: [PATCH 075/182] Add AdamW optimizer --- src/TensorFlowNET.Keras/Optimizers/AdamW.cs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/TensorFlowNET.Keras/Optimizers/AdamW.cs b/src/TensorFlowNET.Keras/Optimizers/AdamW.cs index 469b8ad28..d111b5d3a 100644 --- a/src/TensorFlowNET.Keras/Optimizers/AdamW.cs +++ b/src/TensorFlowNET.Keras/Optimizers/AdamW.cs @@ -1,4 +1,4 @@ -namespace Tensorflow.Keras.Optimizers +namespace Tensorflow.Keras.Optimizers { public class AdamW : Adam { @@ -22,9 +22,6 @@ public AdamW(float learning_rate= 0.001f, protected Operation _decay_weights_op(IVariableV1 var, float learning_rate, Dictionary> apply_state) { - var device_dtype = new DeviceDType(); - device_dtype.DType = var.dtype; - device_dtype.Device = var.Device; bool do_decay = _do_use_weight_decay(var.Name); if (do_decay) return var.assign_add( -learning_rate * var.AsTensor() * apply_state[deType]["weight_decay"]); From 42e8b046ea53f3173ac92fe7cbc1d57d3d796fa8 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Fri, 7 Jul 2023 21:25:41 -0500 Subject: [PATCH 076/182] Release v0.110.1. --- src/TensorFlowNET.Core/APIs/tf.array.cs | 3 +-- src/TensorFlowNET.Core/Operations/array_ops.cs | 16 +--------------- src/TensorFlowNET.Core/Operations/math_ops.cs | 11 +++-------- src/TensorFlowNET.Core/Tensorflow.Binding.csproj | 6 +++--- src/TensorFlowNET.Core/ops.cs | 8 +++++++- src/TensorFlowNET.Keras/Tensorflow.Keras.csproj | 6 +++--- 6 files changed, 18 insertions(+), 32 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs index 6a646512a..ecac37eb1 100644 --- a/src/TensorFlowNET.Core/APIs/tf.array.cs +++ b/src/TensorFlowNET.Core/APIs/tf.array.cs @@ -91,8 +91,7 @@ public Tensor concat(IEnumerable values, int axis, string name = "concat return identity(values.First(), name: scope); }); } - - return gen_array_ops.concat_v2(values.ToArray(), ops.convert_to_tensor(axis), name: name); + return array_ops.concat(values.ToArray(), axis, name: name); } /// diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index fbb3bf119..5237ec446 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -892,23 +892,9 @@ public static Tensor broadcast_static_shape(Tensor shape_x, Tensor shape_y) /// /// /// - public static Tensor concat(Tensor[] values, int axis, string name = "concat") - { - if (values.Length == 1) // Degenerate case of one tensor. - { - return tf_with(ops.name_scope(name), scope => - { - var t = ops.convert_to_tensor(axis, name: "concat_dim", dtype: TF_DataType.TF_INT32); - return identity(values[0], name: scope); - }); - } - - return gen_array_ops.concat_v2(values, ops.convert_to_tensor(axis), name: name); - } - public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat") { - return gen_array_ops.concat_v2(values, axis, name: name); + return tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); } public static Tensor concat(object[] values, int axis, string name = "concat") diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index 6d3860528..092137bf2 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -791,10 +791,7 @@ public static Tensor matmul(Tensor a, Tensor b, bool adjoint_a = false, bool adjoint_b = false, bool a_is_sparse = false, bool b_is_sparse = false, string name = null) - { - Tensor result = null; - - tf_with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => + => tf_with(ops.name_scope(name, "MatMul", (a, b)), scope => { name = scope; @@ -815,12 +812,10 @@ public static Tensor matmul(Tensor a, Tensor b, transpose_b = true; } - result = gen_math_ops.mat_mul(a, b, transpose_a, transpose_b, name); + return tf.Context.ExecuteOp("MatMul", name, new ExecuteOpArgs(a, b) + .SetAttributes(new { transpose_a, transpose_b })); }); - return result; - } - public static Tensor batch_matmul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null) diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index 3bc20289a..6a2dcff7d 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -5,7 +5,7 @@ Tensorflow.Binding Tensorflow 2.10.0 - 0.110.0 + 0.110.1 10.0 enable Haiping Chen, Meinrad Recheis, Eli Belash @@ -20,7 +20,7 @@ Google's TensorFlow full binding in .NET Standard. Building, training and infering deep learning models. https://tensorflownet.readthedocs.io - 0.110.0.0 + 0.110.1.0 tf.net 0.110.x and above are based on tensorflow native 2.11.0 * RNN, LSTM works. @@ -42,7 +42,7 @@ https://tensorflownet.readthedocs.io tf.net 0.10x.x aligns with TensorFlow v2.10.x native library. tf.net 0.11x.x aligns with TensorFlow v2.11.x native library. - 0.110.0.0 + 0.110.1.0 LICENSE true packages diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index 7bd78a79f..2dc463296 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -138,9 +138,15 @@ public static Tensor convert_to_tensor(object value, else { var graph = get_default_graph(); + if (graph is FuncGraph funcGraph) + { + return funcGraph.capture(eager_tensor, name: name); + } if (!graph.building_function) + { throw new RuntimeError("Attempting to capture an EagerTensor without building a function."); - return (graph as FuncGraph).capture(eager_tensor, name: name); + // return eager_tensor.AsPlaceholder(name: name); + } } } diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj index 5dc46fe49..ab667519e 100644 --- a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -7,7 +7,7 @@ enable Tensorflow.Keras AnyCPU;x64 - 0.11.0 + 0.11.1 Haiping Chen Keras for .NET Apache 2.0, Haiping Chen 2023 @@ -38,8 +38,8 @@ Keras is an API designed for human beings, not machines. Keras follows best prac Git true Open.snk - 0.11.0.0 - 0.11.0.0 + 0.11.1.0 + 0.11.1.0 LICENSE Debug;Release;GPU From 992bf55dab0273de568e8347d29fdc19e3ad4aa0 Mon Sep 17 00:00:00 2001 From: Beacontownfc <19636977267@qq.com> Date: Sat, 8 Jul 2023 02:39:06 +0000 Subject: [PATCH 077/182] fix load_weights --- src/TensorFlowNET.Keras/Saving/hdf5_format.cs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs index b04391be9..8ac9fddf6 100644 --- a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs +++ b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs @@ -7,6 +7,8 @@ using static Tensorflow.Binding; using static Tensorflow.KerasApi; using System.Linq; +using System.Text.RegularExpressions; + namespace Tensorflow.Keras.Saving { public class hdf5_format @@ -132,7 +134,9 @@ public static void load_weights_from_hdf5_group(long f, List layers) var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); foreach (var i_ in weight_names) { - (success, Array result) = Hdf5.ReadDataset(g, i_); + var vm = Regex.Replace(i_, "/", "$"); + vm = i_.Split('/')[0] + "/$" + vm.Substring(i_.Split('/')[0].Length + 1, i_.Length - i_.Split('/')[0].Length - 1); + (success, Array result) = Hdf5.ReadDataset(g, vm); if (success) weight_values.Add(np.array(result)); } @@ -193,7 +197,8 @@ public static void save_weights_to_hdf5_group(long f, List layers) if (name.IndexOf("/") > 1) { var crDataGroup = Hdf5.CreateOrOpenGroup(g, Hdf5Utils.NormalizedName(name.Split('/')[0])); - WriteDataset(crDataGroup, name.Split('/')[1], tensor); + var _name = Regex.Replace(name.Substring(name.Split('/')[0].Length, name.Length - name.Split('/')[0].Length), "/", "$"); + WriteDataset(crDataGroup, _name, tensor); Hdf5.CloseGroup(crDataGroup); } else From f01558b642cc7719ac19296374cb897f337300cf Mon Sep 17 00:00:00 2001 From: BalashovK Date: Sat, 8 Jul 2023 15:39:08 -0700 Subject: [PATCH 078/182] exp moved to tf.math.cs --- src/TensorFlowNET.Core/APIs/tf.exp.cs | 25 ------------------------- src/TensorFlowNET.Core/APIs/tf.math.cs | 2 ++ 2 files changed, 2 insertions(+), 25 deletions(-) delete mode 100644 src/TensorFlowNET.Core/APIs/tf.exp.cs diff --git a/src/TensorFlowNET.Core/APIs/tf.exp.cs b/src/TensorFlowNET.Core/APIs/tf.exp.cs deleted file mode 100644 index 56ea1898e..000000000 --- a/src/TensorFlowNET.Core/APIs/tf.exp.cs +++ /dev/null @@ -1,25 +0,0 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -namespace Tensorflow -{ - public partial class tensorflow - { - public Tensor exp(Tensor x, - string name = null) => gen_math_ops.exp(x, name); - - } -} diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index c999933cf..da54a9dd7 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -622,5 +622,7 @@ public Tensor squared_difference(Tensor x, Tensor y, string name = null) => gen_math_ops.squared_difference(x: x, y: y, name: name); public Tensor complex(Tensor real, Tensor imag, Tensorflow.TF_DataType? dtype = null, string name = null) => gen_ops.complex(real, imag, dtype, name); + public Tensor exp(Tensor x, + string name = null) => gen_math_ops.exp(x, name); } } From 4b8e63bb8213ca969b7d03ff3aa76c189f5c1b99 Mon Sep 17 00:00:00 2001 From: BalashovK Date: Sat, 8 Jul 2023 15:39:08 -0700 Subject: [PATCH 079/182] fix: exp moved to tf.math.cs --- src/TensorFlowNET.Core/APIs/tf.exp.cs | 25 ------------------------- src/TensorFlowNET.Core/APIs/tf.math.cs | 2 ++ 2 files changed, 2 insertions(+), 25 deletions(-) delete mode 100644 src/TensorFlowNET.Core/APIs/tf.exp.cs diff --git a/src/TensorFlowNET.Core/APIs/tf.exp.cs b/src/TensorFlowNET.Core/APIs/tf.exp.cs deleted file mode 100644 index 56ea1898e..000000000 --- a/src/TensorFlowNET.Core/APIs/tf.exp.cs +++ /dev/null @@ -1,25 +0,0 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -namespace Tensorflow -{ - public partial class tensorflow - { - public Tensor exp(Tensor x, - string name = null) => gen_math_ops.exp(x, name); - - } -} diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index c999933cf..da54a9dd7 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -622,5 +622,7 @@ public Tensor squared_difference(Tensor x, Tensor y, string name = null) => gen_math_ops.squared_difference(x: x, y: y, name: name); public Tensor complex(Tensor real, Tensor imag, Tensorflow.TF_DataType? dtype = null, string name = null) => gen_ops.complex(real, imag, dtype, name); + public Tensor exp(Tensor x, + string name = null) => gen_math_ops.exp(x, name); } } From b968fd79ab156bfca62f434c7fb936e2ed512455 Mon Sep 17 00:00:00 2001 From: dogvane Date: Mon, 10 Jul 2023 00:41:23 +0800 Subject: [PATCH 080/182] add avg_pool_grad function --- src/TensorFlowNET.Core/Gradients/nn_grad.cs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs index a1ac97a97..3a6efd540 100644 --- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs @@ -365,6 +365,23 @@ public static Tensor[] _MaxPoolGrad(Operation op, Tensor[] grads) }; } + [RegisterGradient("AvgPool")] + public static Tensor[] _AvgPoolGrad(Operation op, Tensor[] grads) + { + Tensor grad = grads[0]; + + return new Tensor[] + { + gen_nn_ops.avg_pool_grad( + array_ops.shape(op.inputs[0]), + grad, + op.get_attr_list("ksize"), + op.get_attr_list("strides"), + op.get_attr("padding").ToString(), + op.get_attr("data_format").ToString()) + }; + } + /// /// Return the gradients for TopK. /// From fa213eb54c2b3c1b28d9ca4ebc2a49d90a0e46bf Mon Sep 17 00:00:00 2001 From: dogvane Date: Mon, 10 Jul 2023 00:52:15 +0800 Subject: [PATCH 081/182] change "bool training" => "bool? training" the bool to tensor has a bug, if in init the training is False, the program not start. --- src/TensorFlowNET.Core/Keras/Layers/ILayer.cs | 2 +- src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs | 2 +- src/TensorFlowNET.Keras/Engine/Layer.Apply.cs | 2 +- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 8 ++++++-- src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 2 +- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs index e94c8bf10..2f92c4e57 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs @@ -15,7 +15,7 @@ public interface ILayer: IWithTrackable, IKerasConfigable List Layers { get; } List InboundNodes { get; } List OutboundNodes { get; } - Tensors Apply(Tensors inputs, Tensors states = null, bool training = false, IOptionalArgs? optional_args = null); + Tensors Apply(Tensors inputs, Tensors states = null, bool? training = false, IOptionalArgs? optional_args = null); List TrainableVariables { get; } List TrainableWeights { get; } List NonTrainableWeights { get; } diff --git a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs index e488c47e7..4e99731f9 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs @@ -145,7 +145,7 @@ private Tensor _zero_state_tensors(object state_size, Tensor batch_size, TF_Data throw new NotImplementedException("_zero_state_tensors"); } - public Tensors Apply(Tensors inputs, Tensors state = null, bool is_training = false, IOptionalArgs? optional_args = null) + public Tensors Apply(Tensors inputs, Tensors state = null, bool? is_training = false, IOptionalArgs? optional_args = null) { throw new NotImplementedException(); } diff --git a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs index d52190fd3..8a66948b9 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs @@ -13,7 +13,7 @@ public partial class Layer /// /// /// - public virtual Tensors Apply(Tensors inputs, Tensors states = null, bool training = false, IOptionalArgs? optional_args = null) + public virtual Tensors Apply(Tensors inputs, Tensors states = null, bool? training = false, IOptionalArgs? optional_args = null) { if (callContext.Value == null) callContext.Value = new CallContext(); diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index 76c592ad6..de57f19ae 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -142,6 +142,7 @@ public History fit(IDatasetV2 dataset, int verbose = 1, List callbacks = null, IDatasetV2 validation_data = null, + int validation_step = 10, // 间隔多少次会进行一次验证 bool shuffle = true, int initial_epoch = 0, int max_queue_size = 10, @@ -164,11 +165,11 @@ public History fit(IDatasetV2 dataset, }); - return FitInternal(data_handler, epochs, verbose, callbacks, validation_data: validation_data, + return FitInternal(data_handler, epochs, validation_step, verbose, callbacks, validation_data: validation_data, train_step_func: train_step_function); } - History FitInternal(DataHandler data_handler, int epochs, int verbose, List callbackList, IDatasetV2 validation_data, + History FitInternal(DataHandler data_handler, int epochs, int validation_step, int verbose, List callbackList, IDatasetV2 validation_data, Func> train_step_func) { stop_training = false; @@ -207,6 +208,9 @@ History FitInternal(DataHandler data_handler, int epochs, int verbose, List 0 && epoch ==0 || (epoch) % validation_step != 0) + continue; + var val_logs = evaluate(validation_data); foreach(var log in val_logs) { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index f86de8a85..0ca62c391 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -393,7 +393,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo } } - public override Tensors Apply(Tensors inputs, Tensors initial_states = null, bool training = false, IOptionalArgs? optional_args = null) + public override Tensors Apply(Tensors inputs, Tensors initial_states = null, bool? training = false, IOptionalArgs? optional_args = null) { RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs; if (optional_args is not null && rnn_optional_args is null) From 7165304ff8b609f40bcbb24c0912a370d2c811ae Mon Sep 17 00:00:00 2001 From: dogvane Date: Mon, 10 Jul 2023 00:53:55 +0800 Subject: [PATCH 082/182] add a fucntion to cover a folder to a image classes dataset. --- ...processing.image_dataset_from_directory.cs | 1 + ...eprocessing.paths_and_labels_to_dataset.cs | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs index 4acae4265..f42d12cde 100644 --- a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs +++ b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs @@ -58,6 +58,7 @@ public IDatasetV2 image_dataset_from_directory(string directory, if (shuffle) dataset = dataset.shuffle(batch_size * 8, seed: seed); dataset = dataset.batch(batch_size); + dataset.class_names = class_name_list; return dataset; } diff --git a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs index b4d583878..eaa762d89 100644 --- a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs +++ b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs @@ -6,6 +6,31 @@ namespace Tensorflow.Keras { public partial class Preprocessing { + + /// + /// 图片路径转为数据处理用的dataset + /// + /// + /// + /// + /// + /// 用于调整大小的插值方法。支持`bilinear`、`nearest`、`bicubic`、`area`、`lanczos3`、`lanczos5`、`gaussian`、`mitchellcubic`。 + /// 默认为`'bilinear'`。 + /// + /// + public IDatasetV2 paths_to_dataset(string[] image_paths, + Shape image_size, + int num_channels = 3, + int num_classes = 6, + string interpolation = "bilinear") + { + var path_ds = tf.data.Dataset.from_tensor_slices(image_paths); + var img_ds = path_ds.map(x => path_to_image(x, image_size, num_channels, interpolation)); + var label_ds = dataset_utils.labels_to_dataset(new int[num_classes] , "", num_classes); + + return img_ds; + } + public IDatasetV2 paths_and_labels_to_dataset(string[] image_paths, Shape image_size, int num_channels, From b2fe5ca080dba6bd6473e386696d7c84191dc7ba Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Sun, 9 Jul 2023 15:35:23 -0500 Subject: [PATCH 083/182] Fix LSTM crash in release mode. --- src/TensorFlowNET.Core/Gradients/nn_grad.cs | 4 +- .../Operations/Operation.cs | 9 +-- .../Tensorflow.Binding.csproj | 72 +++++++++++++++-- .../Tensors/Tensor.Index.cs | 3 +- src/TensorFlowNET.Keras/Activations.cs | 1 - .../Callbacks/Earlystopping.cs | 3 - .../DataAdapters/TensorLikeDataAdapter.cs | 2 +- src/TensorFlowNET.Keras/Engine/Layer.Apply.cs | 2 +- .../Layer.FunctionalConstructionCall.cs | 10 --- .../Engine/Model.Evaluate.cs | 2 +- src/TensorFlowNET.Keras/Engine/Model.Train.cs | 4 +- src/TensorFlowNET.Keras/KerasInterface.cs | 4 +- .../Layers/Attention/BaseDenseAttention.cs | 10 +-- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 3 - .../Layers/Reshaping/Cropping1D.cs | 1 - src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 5 +- ...tUtils.get_training_or_validation_split.cs | 2 +- .../Saving/KerasObjectLoader.cs | 1 - .../Tensorflow.Keras.csproj | 79 +++++++++++++++++-- 19 files changed, 160 insertions(+), 57 deletions(-) diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs index 3a6efd540..a43a91b9a 100644 --- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs @@ -377,8 +377,8 @@ public static Tensor[] _AvgPoolGrad(Operation op, Tensor[] grads) grad, op.get_attr_list("ksize"), op.get_attr_list("strides"), - op.get_attr("padding").ToString(), - op.get_attr("data_format").ToString()) + op.get_attr("padding"), + op.get_attr("data_format")) }; } diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index d31b26d4a..e59c381cb 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -206,12 +206,11 @@ internal unsafe TF_DataType _get_attr_type(string name) return result; } - internal unsafe int _get_attr_int(string name) + internal unsafe long _get_attr_int(string name) { - Status status = new(); - int result; - c_api.TF_OperationGetAttrInt(_handle, name, new IntPtr(&result), status); - status.Check(true); + long result; + c_api.TF_OperationGetAttrInt(_handle, name, new IntPtr(&result), tf.Status); + tf.Status.Check(true); return result; } diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index 6a2dcff7d..ca5aa47a9 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -4,11 +4,11 @@ netstandard2.0;net6.0 Tensorflow.Binding Tensorflow - 2.10.0 - 0.110.1 + 2.11.0 + 0.110.2 10.0 enable - Haiping Chen, Meinrad Recheis, Eli Belash + Haiping Chen, Eli Belash, Yaohui Liu, Meinrad Recheis SciSharp STACK False Apache 2.0, Haiping Chen $([System.DateTime]::UtcNow.ToString(yyyy)) @@ -23,7 +23,8 @@ https://tensorflownet.readthedocs.io 0.110.1.0 tf.net 0.110.x and above are based on tensorflow native 2.11.0 - * RNN, LSTM works. + * Support RNN, LSTM model. + * Support Transformer model. tf.net 0.100.x and above are based on tensorflow native 2.10.0 @@ -42,12 +43,11 @@ https://tensorflownet.readthedocs.io tf.net 0.10x.x aligns with TensorFlow v2.10.x native library. tf.net 0.11x.x aligns with TensorFlow v2.11.x native library. - 0.110.1.0 + 0.110.2.0 LICENSE true packages true - Open.snk AnyCPU;x64 TensorFlow.NET Debug;Release;GPU @@ -88,6 +88,66 @@ https://tensorflownet.readthedocs.io + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + + + 1 + $(NoWarn),1570,1573,1591,1712,8603,8604,8625,CS0612 + + diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Index.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Index.cs index c8f47825c..217712fef 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Index.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Index.cs @@ -180,8 +180,7 @@ public Tensor slice(int start) array_ops.stack(end.ToArray()), array_ops.stack(strides.ToArray())); - return gen_array_ops.strided_slice( - this, + return array_ops.strided_slice(this, packed_begin, packed_end, packed_strides, diff --git a/src/TensorFlowNET.Keras/Activations.cs b/src/TensorFlowNET.Keras/Activations.cs index d6d8e3914..ce5b4eb13 100644 --- a/src/TensorFlowNET.Keras/Activations.cs +++ b/src/TensorFlowNET.Keras/Activations.cs @@ -44,7 +44,6 @@ public class Activations: IActivationsApi /// /// Register the name-activation mapping in this static class. /// - /// /// private static void RegisterActivation(Activation activation) { diff --git a/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs b/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs index b3b78423c..36993b637 100644 --- a/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs +++ b/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs @@ -5,9 +5,6 @@ namespace Tensorflow.Keras.Callbacks; /// /// Stop training when a monitored metric has stopped improving. /// -/// -/// - public class EarlyStopping: ICallback { int _paitence; diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs index b93c6aed7..16e646a35 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs @@ -52,7 +52,7 @@ Tensors permutation(Tensors tensor) /// /// Convert a Tensor of indices into a dataset of batched indices. /// - /// + /// /// IDatasetV2 slice_batch_indices(Tensor indices) { diff --git a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs index 8a66948b9..a3831bffa 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs @@ -10,7 +10,7 @@ public partial class Layer /// Wraps `call`, applying pre- and post-processing steps. /// /// - /// + /// /// /// public virtual Tensors Apply(Tensors inputs, Tensors states = null, bool? training = false, IOptionalArgs? optional_args = null) diff --git a/src/TensorFlowNET.Keras/Engine/Layer.FunctionalConstructionCall.cs b/src/TensorFlowNET.Keras/Engine/Layer.FunctionalConstructionCall.cs index 1d96e5811..e4023c3fd 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.FunctionalConstructionCall.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.FunctionalConstructionCall.cs @@ -1,7 +1,5 @@ using System; using Tensorflow.Keras.Utils; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; namespace Tensorflow.Keras.Engine { @@ -9,14 +7,6 @@ public partial class Layer { Tensors FunctionalConstructionCall(Tensors inputs) { - bool mask_arg_passed_by_framework = false; - bool training_arg_passed_by_framework = false; - Tensor training_value = null; - if (training_value == null) - { - training_arg_passed_by_framework = true; - } - if (base_layer_utils.needs_keras_history(inputs)) base_layer_utils.create_keras_history(inputs); diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index c4761f873..a74a77f18 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -15,7 +15,7 @@ namespace Tensorflow.Keras.Engine public partial class Model { /// - /// Returns the loss value & metrics values for the model in test mode. + /// Returns the loss value and metrics values for the model in test mode. /// /// /// diff --git a/src/TensorFlowNET.Keras/Engine/Model.Train.cs b/src/TensorFlowNET.Keras/Engine/Model.Train.cs index 48c16e181..ad3c70d2d 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Train.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Train.cs @@ -29,7 +29,9 @@ Dictionary train_step_multi_inputs_function(DataHandler data_hand /// /// The logic for one training step. /// - /// + /// + /// + /// /// Dictionary train_step(DataHandler data_handler, Tensors x, Tensors y) { diff --git a/src/TensorFlowNET.Keras/KerasInterface.cs b/src/TensorFlowNET.Keras/KerasInterface.cs index 159564aac..6bc381095 100644 --- a/src/TensorFlowNET.Keras/KerasInterface.cs +++ b/src/TensorFlowNET.Keras/KerasInterface.cs @@ -72,8 +72,8 @@ public Sequential Sequential(params ILayer[] layers) /// /// `Model` groups layers into an object with training and inference features. /// - /// - /// + /// + /// /// public IModel Model(Tensors inputs, Tensors outputs, string name = null) => new Functional(inputs, outputs, name: name); diff --git a/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs b/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs index 19b292727..970a938d2 100644 --- a/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs +++ b/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs @@ -1,24 +1,18 @@ using Tensorflow.Keras.Engine; using Tensorflow.Keras.ArgsDefinition; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; using System; using System.Collections.Generic; using System.Linq; using Tensorflow.Keras.Saving; using Tensorflow.Common.Types; -/// -/// Base class for attention layers that can be used in sequence DNN/CNN models. -///This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2. -///Attention is formed by three tensors: Query, Key and Value. -/// - namespace Tensorflow.Keras.Layers { /// /// Base Attention class for Dense networks. + /// This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2. + /// Attention is formed by three tensors: Query, Key and Value. /// This class is suitable for Dense or CNN networks, and not for RNN networks. /// Implementations of attention mechanisms should inherit from this class, and /// reuse the `apply_attention_scores()` method. diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index d20803375..213b53a82 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -183,9 +183,6 @@ public ILayer Conv2D(int filters, /// Boolean, whether the layer uses a bias vector. /// The name of the initializer for the kernel weights matrix (see keras.initializers). /// The name of the initializer for the bias vector (see keras.initializers). - /// The name of the regularizer function applied to the kernel weights matrix (see keras.regularizers). - /// The name of the regularizer function applied to the bias vector (see keras.regularizers). - /// The name of the regularizer function applied to the output of the layer (its "activation") (see keras.regularizers). /// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias). public ILayer Conv2D(int filters, Shape kernel_size = null, diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs index 312854388..7d5385e6f 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs @@ -2,7 +2,6 @@ using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; using Tensorflow.Common.Types; -using Tensorflow.Common.Types; namespace Tensorflow.Keras.Layers.Reshaping { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index 0ca62c391..6075547bb 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -242,10 +242,9 @@ object get_state_spec(Shape shape) /// /// /// - /// Binary tensor of shape [batch_size, timesteps] indicating whether a given timestep should be masked - /// /// List of initial state tensors to be passed to the first call of the cell - /// List of constant tensors to be passed to the cell at each timestep + /// + /// /// /// /// diff --git a/src/TensorFlowNET.Keras/Preprocessings/DatasetUtils.get_training_or_validation_split.cs b/src/TensorFlowNET.Keras/Preprocessings/DatasetUtils.get_training_or_validation_split.cs index 2f3d8f527..18ca404ef 100644 --- a/src/TensorFlowNET.Keras/Preprocessings/DatasetUtils.get_training_or_validation_split.cs +++ b/src/TensorFlowNET.Keras/Preprocessings/DatasetUtils.get_training_or_validation_split.cs @@ -6,7 +6,7 @@ namespace Tensorflow.Keras.Preprocessings public partial class DatasetUtils { /// - /// Potentially restict samples & labels to a training or validation split. + /// Potentially restict samples and labels to a training or validation split. /// /// /// diff --git a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs index 1e869d666..fd1453d3c 100644 --- a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs +++ b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs @@ -693,7 +693,6 @@ private bool _try_build_layer(Layer obj, int node_id, KerasShapesWrapper build_i /// Infers input shape of layer from SavedModel functions. /// /// - /// /// private TensorSpec _infer_inputs(int layer_node_id) { diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj index ab667519e..c7fa7711c 100644 --- a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -7,7 +7,7 @@ enable Tensorflow.Keras AnyCPU;x64 - 0.11.1 + 0.11.2 Haiping Chen Keras for .NET Apache 2.0, Haiping Chen 2023 @@ -26,7 +26,8 @@ * Add Subtract layer * Text preprocessing * Preprocessing.timeseries_dataset_from_array -* Fixed memory leak for YOLOv3 model. +* Fixed memory leak for YOLOv3 model. +* Support RNN and LSTM models Keras for .NET Keras is an API designed for human beings, not machines. Keras follows best practices for reducing cognitive load: it offers consistent & simple APIs, it minimizes the number of user actions required for common use cases, and it provides clear & actionable error messages. @@ -36,10 +37,10 @@ Keras is an API designed for human beings, not machines. Keras follows best prac true packages Git - true + False Open.snk - 0.11.1.0 - 0.11.1.0 + 0.11.2.0 + 0.11.2.0 LICENSE Debug;Release;GPU @@ -70,6 +71,74 @@ Keras is an API designed for human beings, not machines. Keras follows best prac + + True + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + True + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + True + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + True + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + False + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + False + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + False + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + False + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + + + 1 + $(NoWarn),1573,1591,1712,8602,8603,8625,CS0612 + + From f56811d080f8891a396831c39073b687e1733302 Mon Sep 17 00:00:00 2001 From: dogvane Date: Tue, 11 Jul 2023 02:31:32 +0800 Subject: [PATCH 084/182] fix flip_left_right run bug --- src/TensorFlowNET.Core/Operations/image_ops_impl.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs index 126df9e42..0ced407a8 100644 --- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs +++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs @@ -208,7 +208,7 @@ internal static Tensor _random_flip(Tensor image, int flip_index, int seed, stri } public static Tensor flip_left_right(Tensor image) - => _flip(image, 1, "flip_left_right"); + => _flip(image, 0, "flip_left_right"); public static Tensor flip_up_down(Tensor image) => _flip(image, 1, "flip_up_down"); @@ -226,7 +226,7 @@ internal static Tensor _flip(Tensor image, int flip_index, string scope_name) } else if (shape.ndim == 4) { - return gen_array_ops.reverse(image, ops.convert_to_tensor(new[] { flip_index + 1 })); + return gen_array_ops.reverse_v2(image, ops.convert_to_tensor(new[] { (flip_index + 1) % 2 })); } else { From 70f873eccef99e4ca6af39a8ac798cc36292ace2 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Mon, 10 Jul 2023 15:02:39 -0500 Subject: [PATCH 085/182] Initially adding KerasTensor. #1142 --- src/TensorFlowNET.Core/GlobalUsing.cs | 4 +- .../Keras/Layers/ILayersApi.cs | 3 +- src/TensorFlowNET.Core/Tensors/KerasTensor.cs | 40 +++++++++++++++++++ src/TensorFlowNET.Keras/BackendImpl.cs | 2 +- src/TensorFlowNET.Keras/GlobalUsing.cs | 3 +- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 2 +- 6 files changed, 49 insertions(+), 5 deletions(-) create mode 100644 src/TensorFlowNET.Core/Tensors/KerasTensor.cs diff --git a/src/TensorFlowNET.Core/GlobalUsing.cs b/src/TensorFlowNET.Core/GlobalUsing.cs index 2fd5b437b..209bc291f 100644 --- a/src/TensorFlowNET.Core/GlobalUsing.cs +++ b/src/TensorFlowNET.Core/GlobalUsing.cs @@ -3,4 +3,6 @@ global using System.Text; global using System.Collections; global using System.Data; -global using System.Linq; \ No newline at end of file +global using System.Linq; +global using Tensorflow.Keras.Engine; +global using Tensorflow.Framework.Models; \ No newline at end of file diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index 9bc99701d..b48cd5535 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -1,5 +1,6 @@ using System; using Tensorflow.Framework.Models; +using Tensorflow.Keras.Engine; using Tensorflow.Keras.Layers.Rnn; using Tensorflow.NumPy; using static Google.Protobuf.Reflection.FieldDescriptorProto.Types; @@ -135,7 +136,7 @@ public ILayer EinsumDense(string equation, public ILayer GlobalMaxPooling1D(string data_format = "channels_last"); public ILayer GlobalMaxPooling2D(string data_format = "channels_last"); - public Tensors Input(Shape shape = null, + public KerasTensor Input(Shape shape = null, int batch_size = -1, string name = null, TF_DataType dtype = TF_DataType.DtInvalid, diff --git a/src/TensorFlowNET.Core/Tensors/KerasTensor.cs b/src/TensorFlowNET.Core/Tensors/KerasTensor.cs new file mode 100644 index 000000000..1034dcc8f --- /dev/null +++ b/src/TensorFlowNET.Core/Tensors/KerasTensor.cs @@ -0,0 +1,40 @@ +namespace Tensorflow.Keras.Engine; + +/// +/// A representation of a Keras in/output during Functional API construction. +/// +public class KerasTensor +{ + private Tensor _tensor; + public void SetTensor(Tensors tensor) + => _tensor = tensor; + + private TensorSpec _type_spec; + private string _name; + + public KerasTensor(TensorSpec type_spec, string name = null) + { + _type_spec = type_spec; + _name = name; + } + + public static KerasTensor from_tensor(Tensor tensor) + { + var type_spec = tensor.ToTensorSpec(); + var kt = new KerasTensor(type_spec, name: tensor.name); + kt.SetTensor(tensor); + return kt; + } + + public static implicit operator Tensors(KerasTensor kt) + => kt._tensor; + + public static implicit operator Tensor(KerasTensor kt) + => kt._tensor; + + public static implicit operator KerasTensor(Tensor tensor) + => from_tensor(tensor); + + public static implicit operator KerasTensor(Tensors tensors) + => from_tensor(tensors.First()); +} diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index 364800ae5..574cf5990 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -76,7 +76,7 @@ public void track_variable(IVariableV1 v) _GRAPH_VARIABLES[graph.graph_key] = v; } - public Tensor placeholder(Shape shape = null, + public KerasTensor placeholder(Shape shape = null, int ndim = -1, TF_DataType dtype = TF_DataType.DtInvalid, bool sparse = false, diff --git a/src/TensorFlowNET.Keras/GlobalUsing.cs b/src/TensorFlowNET.Keras/GlobalUsing.cs index bc0798ede..85cd9194c 100644 --- a/src/TensorFlowNET.Keras/GlobalUsing.cs +++ b/src/TensorFlowNET.Keras/GlobalUsing.cs @@ -4,4 +4,5 @@ global using System.Linq; global using static Tensorflow.Binding; global using static Tensorflow.KerasApi; -global using Tensorflow.NumPy; \ No newline at end of file +global using Tensorflow.NumPy; +global using Tensorflow.Keras.Engine; \ No newline at end of file diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 213b53a82..5968461d0 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -466,7 +466,7 @@ public ILayer Flatten(string data_format = null) /// In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see this guide. /// /// A tensor. - public Tensors Input(Shape shape = null, + public KerasTensor Input(Shape shape = null, int batch_size = -1, string name = null, TF_DataType dtype = TF_DataType.DtInvalid, From ed1a8d2edfbad3e47efa48af5e1dbb4c22a20f2e Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Mon, 10 Jul 2023 23:00:26 -0500 Subject: [PATCH 086/182] Add shape and dtype to KerasTensor --- .../Operations/array_ops.cs | 49 ++++++++++++------- src/TensorFlowNET.Core/Tensors/KerasTensor.cs | 27 +++++++--- .../Tensors/Tensor.Index.cs | 2 +- 3 files changed, 52 insertions(+), 26 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 5237ec446..02bf0e868 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -603,7 +603,17 @@ public static Tensor shape_internal(Tensor input, string name = null, bool optim } } - return gen_array_ops.shape(input, name: name, out_type: out_type); + return tf.Context.ExecuteOp("Shape", name, new ExecuteOpArgs(input) + { + GetGradientAttrs = (op) => new + { + T = op.get_attr("T"), + out_type = op.get_attr("out_type") + } + }.SetAttributes(new + { + out_type + })).First(); }); } @@ -703,23 +713,26 @@ public static Tensor strided_slice(Tensor input_, Tensor begin, Tensor end, int new_axis_mask = 0, int shrink_axis_mask = 0, string name = null) - { - var op = gen_array_ops.strided_slice( - input: input_, - begin: begin, - end: end, - strides: strides, - begin_mask: begin_mask, - end_mask: end_mask, - ellipsis_mask: ellipsis_mask, - new_axis_mask: new_axis_mask, - shrink_axis_mask: shrink_axis_mask, - name: name); - - string parent_name = name; - - return op; - } + => tf.Context.ExecuteOp("StridedSlice", name, new ExecuteOpArgs(input_, begin, end, strides) + { + GetGradientAttrs = (op) => new + { + T = op.get_attr("T"), + Index = op.get_attr("Index"), + begin_mask = op.get_attr("begin_mask"), + end_mask = op.get_attr("end_mask"), + ellipsis_mask = op.get_attr("ellipsis_mask"), + new_axis_mask = op.get_attr("new_axis_mask"), + shrink_axis_mask = op.get_attr("shrink_axis_mask") + } + }.SetAttributes(new + { + begin_mask, + end_mask, + ellipsis_mask, + new_axis_mask, + shrink_axis_mask + })); /// /// Returns the gradient of `StridedSlice`. diff --git a/src/TensorFlowNET.Core/Tensors/KerasTensor.cs b/src/TensorFlowNET.Core/Tensors/KerasTensor.cs index 1034dcc8f..3204b4ac0 100644 --- a/src/TensorFlowNET.Core/Tensors/KerasTensor.cs +++ b/src/TensorFlowNET.Core/Tensors/KerasTensor.cs @@ -5,12 +5,17 @@ /// public class KerasTensor { - private Tensor _tensor; - public void SetTensor(Tensors tensor) - => _tensor = tensor; + private Tensors _inferred_value; + public Tensors inferred_value + { + get => _inferred_value; + set => _inferred_value = value; + } - private TensorSpec _type_spec; private string _name; + private TensorSpec _type_spec; + public Shape shape => _type_spec.shape; + public TF_DataType dtype => _type_spec.dtype; public KerasTensor(TensorSpec type_spec, string name = null) { @@ -22,15 +27,23 @@ public static KerasTensor from_tensor(Tensor tensor) { var type_spec = tensor.ToTensorSpec(); var kt = new KerasTensor(type_spec, name: tensor.name); - kt.SetTensor(tensor); + kt.inferred_value = tensor; return kt; } + public override string ToString() + => _inferred_value.Length switch + { + > 1 => "[" + string.Join(", ", _inferred_value.Select(x => $"")) + "]", + 1 => $"", + _ => _inferred_value.ToString(), + }; + public static implicit operator Tensors(KerasTensor kt) - => kt._tensor; + => kt._inferred_value; public static implicit operator Tensor(KerasTensor kt) - => kt._tensor; + => kt._inferred_value; public static implicit operator KerasTensor(Tensor tensor) => from_tensor(tensor); diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Index.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Index.cs index 217712fef..51062cf3b 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Index.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Index.cs @@ -42,7 +42,7 @@ public Tensor this[params Slice[] slices] array_ops.stack(args.End), array_ops.stack(args.Strides)); - return gen_array_ops.strided_slice( + return array_ops.strided_slice( this, packed_begin, packed_end, From b27ccca84fe68394e4ffbf4babd16d0d2e05674e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Tue, 11 Jul 2023 22:40:30 +0800 Subject: [PATCH 087/182] fix:fix the bug of load LSTM model --- .../Keras/ArgsDefinition/Rnn/GRUCellArgs.cs | 2 +- .../Keras/ArgsDefinition/Rnn/LSTMArgs.cs | 2 +- .../Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs | 2 +- .../Keras/ArgsDefinition/Rnn/RNNArgs.cs | 12 ++++++-- .../ArgsDefinition/Rnn/RnnOptionalArgs.cs | 2 +- .../Keras/ArgsDefinition/Rnn/SimpleRNNArgs.cs | 2 +- .../ArgsDefinition/Rnn/SimpleRNNCellArgs.cs | 2 +- .../ArgsDefinition/Rnn/StackedRNNCellsArgs.cs | 4 +-- .../Keras/Layers/ILayersApi.cs | 2 +- .../Keras/Layers/Rnn/IRnnCell.cs | 2 +- .../Keras/Layers/Rnn/IStackedRnnCells.cs | 2 +- .../Operations/NnOps/RNNCell.cs | 3 +- src/TensorFlowNET.Core/ops.cs | 4 ++- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 3 +- .../Layers/Rnn/DropoutRNNCellMixin.cs | 2 +- src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs | 3 +- src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs | 4 +-- .../Layers/Rnn/LSTMCell.cs | 4 +-- src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 4 +-- src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs | 2 +- .../Layers/Rnn/SimpleRNN.cs | 4 +-- .../Layers/Rnn/SimpleRNNCell.cs | 4 +-- .../Layers/Rnn/StackedRNNCells.cs | 4 +-- .../Saving/KerasObjectLoader.cs | 1 - .../SavedModel/serialized_attributes.cs | 2 +- src/TensorFlowNET.Keras/Utils/RnnUtils.cs | 2 +- .../Layers/Rnn.Test.cs | 2 +- .../Model/ModelLoadTest.cs | 29 ++++++++++++++++++- 28 files changed, 71 insertions(+), 40 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUCellArgs.cs index 75d5d0218..624756afe 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUCellArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUCellArgs.cs @@ -3,7 +3,7 @@ using System.Collections.Generic; using System.Text; -namespace Tensorflow.Keras.ArgsDefinition.Rnn +namespace Tensorflow.Keras.ArgsDefinition { public class GRUCellArgs : AutoSerializeLayerArgs { diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs index db76fda06..d816b0ff7 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs @@ -1,4 +1,4 @@ -namespace Tensorflow.Keras.ArgsDefinition.Rnn +namespace Tensorflow.Keras.ArgsDefinition { public class LSTMArgs : RNNArgs { diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs index 786236e4d..f45032312 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs @@ -1,7 +1,7 @@ using Newtonsoft.Json; using static Tensorflow.Binding; -namespace Tensorflow.Keras.ArgsDefinition.Rnn +namespace Tensorflow.Keras.ArgsDefinition { // TODO: complete the implementation public class LSTMCellArgs : AutoSerializeLayerArgs diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs index 2d7fb001a..b84d30d3d 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs @@ -1,8 +1,8 @@ using Newtonsoft.Json; using System.Collections.Generic; -using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Layers; -namespace Tensorflow.Keras.ArgsDefinition.Rnn +namespace Tensorflow.Keras.ArgsDefinition { // TODO(Rinne): add regularizers. public class RNNArgs : AutoSerializeLayerArgs @@ -23,16 +23,22 @@ public class RNNArgs : AutoSerializeLayerArgs public int? InputDim { get; set; } public int? InputLength { get; set; } // TODO: Add `num_constants` and `zero_output_for_mask`. - + [JsonProperty("units")] public int Units { get; set; } + [JsonProperty("activation")] public Activation Activation { get; set; } + [JsonProperty("recurrent_activation")] public Activation RecurrentActivation { get; set; } + [JsonProperty("use_bias")] public bool UseBias { get; set; } = true; public IInitializer KernelInitializer { get; set; } public IInitializer RecurrentInitializer { get; set; } public IInitializer BiasInitializer { get; set; } + [JsonProperty("dropout")] public float Dropout { get; set; } = .0f; + [JsonProperty("zero_output_for_mask")] public bool ZeroOutputForMask { get; set; } = false; + [JsonProperty("recurrent_dropout")] public float RecurrentDropout { get; set; } = .0f; } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs index 64b500bba..a6520589d 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs @@ -3,7 +3,7 @@ using System.Text; using Tensorflow.Common.Types; -namespace Tensorflow.Keras.ArgsDefinition.Rnn +namespace Tensorflow.Keras.ArgsDefinition { public class RnnOptionalArgs: IOptionalArgs { diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNArgs.cs index fcfd694d1..e45ef79d0 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNArgs.cs @@ -1,4 +1,4 @@ -namespace Tensorflow.Keras.ArgsDefinition.Rnn +namespace Tensorflow.Keras.ArgsDefinition { public class SimpleRNNArgs : RNNArgs { diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs index d21d61905..b84ea21b3 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs @@ -1,6 +1,6 @@ using Newtonsoft.Json; -namespace Tensorflow.Keras.ArgsDefinition.Rnn +namespace Tensorflow.Keras.ArgsDefinition { public class SimpleRNNCellArgs: AutoSerializeLayerArgs { diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs index 50a6127df..2600f14ee 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs @@ -1,7 +1,7 @@ using System.Collections.Generic; -using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Layers; -namespace Tensorflow.Keras.ArgsDefinition.Rnn +namespace Tensorflow.Keras.ArgsDefinition { public class StackedRNNCellsArgs : LayerArgs { diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index b48cd5535..1670f9d1d 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -1,7 +1,7 @@ using System; using Tensorflow.Framework.Models; using Tensorflow.Keras.Engine; -using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Layers; using Tensorflow.NumPy; using static Google.Protobuf.Reflection.FieldDescriptorProto.Types; diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs index 8d6fbc976..43df75b17 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs @@ -3,7 +3,7 @@ using System.Text; using Tensorflow.Common.Types; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { public interface IRnnCell: ILayer { diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs index e73244a51..8cf6150d3 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs @@ -2,7 +2,7 @@ using System.Collections.Generic; using System.Text; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { public interface IStackedRnnCells : IRnnCell { diff --git a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs index 4e99731f9..9905d39c8 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs @@ -19,9 +19,8 @@ limitations under the License. using Tensorflow.Common.Types; using Tensorflow.Keras; using Tensorflow.Keras.ArgsDefinition; -using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; -using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Layers; using Tensorflow.Keras.Saving; using Tensorflow.NumPy; using Tensorflow.Operations; diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index 2dc463296..c624c9901 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -571,7 +571,9 @@ public static bool executing_eagerly_outside_functions() if (tf.Context.executing_eagerly()) return true; else - throw new NotImplementedException(""); + // TODO(Wanglongzhi2001), implement the false case + return true; + //throw new NotImplementedException(""); } public static bool inside_function() diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 5968461d0..cb85bbba1 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -2,9 +2,8 @@ using Tensorflow.Framework.Models; using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.ArgsDefinition.Core; -using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; -using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Layers; using Tensorflow.NumPy; using static Tensorflow.Binding; using static Tensorflow.KerasApi; diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs index 75feb8ea2..27c13f349 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs @@ -6,7 +6,7 @@ using Tensorflow.Keras.Engine; using Tensorflow.Keras.Utils; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { public abstract class DropoutRNNCellMixin: Layer, IRnnCell { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs index 02fe54f49..2b9c01e31 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/GRUCell.cs @@ -3,12 +3,11 @@ using System.Diagnostics; using System.Text; using Tensorflow.Keras.ArgsDefinition; -using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Common.Extensions; using Tensorflow.Common.Types; using Tensorflow.Keras.Saving; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { /// /// Cell class for the GRU layer. diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs index 025465fd6..b5d583248 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs @@ -1,10 +1,10 @@ using System.Linq; -using Tensorflow.Keras.ArgsDefinition.Rnn; +using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Common.Types; using Tensorflow.Common.Extensions; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { /// /// Long Short-Term Memory layer - Hochreiter 1997. diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs index 284a2b778..e4fc6dd22 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs @@ -3,12 +3,12 @@ using System.Diagnostics; using Tensorflow.Common.Extensions; using Tensorflow.Common.Types; -using Tensorflow.Keras.ArgsDefinition.Rnn; +using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; using Tensorflow.Keras.Utils; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { /// /// Cell class for the LSTM layer. diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index 6075547bb..0e81d20e3 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -3,7 +3,6 @@ using System.Collections.Generic; using System.Reflection; using Tensorflow.Keras.ArgsDefinition; -using Tensorflow.Keras.ArgsDefinition.Rnn; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; using Tensorflow.Util; @@ -14,7 +13,7 @@ using System.Runtime.CompilerServices; // from tensorflow.python.distribute import distribution_strategy_context as ds_context; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { /// /// Base class for recurrent layers. @@ -185,6 +184,7 @@ private Tensors compute_mask(Tensors inputs, Tensors mask) public override void build(KerasShapesWrapper input_shape) { + _buildInputShape = input_shape; input_shape = new KerasShapesWrapper(input_shape.Shapes[0]); InputSpec get_input_spec(Shape shape) diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs index 018b17780..1419da4b2 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs @@ -4,7 +4,7 @@ using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { public abstract class RnnBase: Layer { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs index a22f31c7d..9c199eb43 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs @@ -1,11 +1,11 @@ using System.Data; -using Tensorflow.Keras.ArgsDefinition.Rnn; +using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Saving; using Tensorflow.Operations.Activation; using static HDF.PInvoke.H5Z; using static Tensorflow.ApiDef.Types; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { public class SimpleRNN : RNN { diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs index c77f77790..e74b56925 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs @@ -1,7 +1,7 @@ using System; using System.Collections.Generic; using System.Text; -using Tensorflow.Keras.ArgsDefinition.Rnn; +using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; using Tensorflow.Common.Types; @@ -9,7 +9,7 @@ using Tensorflow.Keras.Utils; using Tensorflow.Graphs; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { /// /// Cell class for SimpleRNN. diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs index 8799bfb23..ece2bc5bf 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs @@ -3,12 +3,12 @@ using System.Linq; using Tensorflow.Common.Extensions; using Tensorflow.Common.Types; -using Tensorflow.Keras.ArgsDefinition.Rnn; +using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; using Tensorflow.Keras.Utils; -namespace Tensorflow.Keras.Layers.Rnn +namespace Tensorflow.Keras.Layers { public class StackedRNNCells : Layer, IRnnCell { diff --git a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs index fd1453d3c..0bd816ccb 100644 --- a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs +++ b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs @@ -13,7 +13,6 @@ using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Layers; -using Tensorflow.Keras.Layers.Rnn; using Tensorflow.Keras.Losses; using Tensorflow.Keras.Metrics; using Tensorflow.Keras.Saving.SavedModel; diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/serialized_attributes.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/serialized_attributes.cs index 0ec5d1a8c..325d3327a 100644 --- a/src/TensorFlowNET.Keras/Saving/SavedModel/serialized_attributes.cs +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/serialized_attributes.cs @@ -3,7 +3,7 @@ using System.Linq; using System.Text; using Tensorflow.Keras.Engine; -using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Layers; using Tensorflow.Keras.Metrics; using Tensorflow.Train; diff --git a/src/TensorFlowNET.Keras/Utils/RnnUtils.cs b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs index e8700c1f2..1e9f6d845 100644 --- a/src/TensorFlowNET.Keras/Utils/RnnUtils.cs +++ b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs @@ -3,7 +3,7 @@ using System.Diagnostics; using System.Text; using Tensorflow.Common.Types; -using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Layers; using Tensorflow.Common.Extensions; namespace Tensorflow.Keras.Utils diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index becdbcd60..5f7bd574e 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -6,7 +6,7 @@ using System.Threading.Tasks; using Tensorflow.Common.Types; using Tensorflow.Keras.Engine; -using Tensorflow.Keras.Layers.Rnn; +using Tensorflow.Keras.Layers; using Tensorflow.Keras.Saving; using Tensorflow.NumPy; using Tensorflow.Train; diff --git a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs index 10db2bd11..382941d9a 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs @@ -1,5 +1,7 @@ -using Microsoft.VisualStudio.TestTools.UnitTesting; +using Microsoft.VisualStudio.TestPlatform.Utilities; +using Microsoft.VisualStudio.TestTools.UnitTesting; using System.Linq; +using Tensorflow.Keras.Engine; using Tensorflow.Keras.Optimizers; using Tensorflow.Keras.UnitTest.Helpers; using Tensorflow.NumPy; @@ -79,6 +81,31 @@ public void ModelWithSelfDefinedModule() model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size, num_epochs); } + [TestMethod] + public void LSTMLoad() + { + var inputs = np.random.randn(10, 5, 3); + var outputs = np.random.randn(10, 1); + var model = keras.Sequential(); + model.add(keras.Input(shape: (5, 3))); + var lstm = keras.layers.LSTM(32); + + model.add(lstm); + + model.add(keras.layers.Dense(1, keras.activations.Sigmoid)); + + model.compile(optimizer: keras.optimizers.Adam(), + loss: keras.losses.MeanSquaredError(), + new[] { "accuracy" }); + + var result = model.fit(inputs.numpy(), outputs.numpy(), batch_size: 10, epochs: 3, workers: 16, use_multiprocessing: true); + + model.save("LSTM_Random"); + + var model_loaded = keras.models.load_model("LSTM_Random"); + model_loaded.summary(); + } + [Ignore] [TestMethod] public void VGG19() From 9c949e336f6c9fedd6a6ae5eace581084d16a8b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Tue, 11 Jul 2023 23:44:42 +0800 Subject: [PATCH 088/182] refactor: refactor LSTMLoad test --- .../lstm_from_sequential/fingerprint.pb | 1 + .../lstm_from_sequential/keras_metadata.pb | 7 +++++ .../lstm_from_sequential/saved_model.pb | Bin 0 -> 755111 bytes .../variables/variables.data-00000-of-00001 | Bin 0 -> 61038 bytes .../variables/variables.index | Bin 0 -> 1373 bytes .../Model/ModelLoadTest.cs | 26 ++++-------------- .../Tensorflow.Keras.UnitTest.csproj | 16 +++++++++++ 7 files changed, 30 insertions(+), 20 deletions(-) create mode 100644 test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/fingerprint.pb create mode 100644 test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/keras_metadata.pb create mode 100644 test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/saved_model.pb create mode 100644 test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/variables/variables.data-00000-of-00001 create mode 100644 test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/variables/variables.index diff --git a/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/fingerprint.pb b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/fingerprint.pb new file mode 100644 index 000000000..f6ea8da23 --- /dev/null +++ b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/fingerprint.pb @@ -0,0 +1 @@ +沦Ʉ%̟땐͉ Σ(Ћ܇}2 \ No newline at end of file diff --git a/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/keras_metadata.pb b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/keras_metadata.pb new file mode 100644 index 000000000..5fe8f1a65 --- /dev/null +++ b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/keras_metadata.pb @@ -0,0 +1,7 @@ + +&root"_tf_keras_sequential*&{"name": "sequential", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": false, "class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 5, 3]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}}, {"class_name": "LSTM", "config": {"name": "lstm", "trainable": true, "dtype": "float32", "return_sequences": false, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 32, "activation": "tanh", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}, "shared_object_id": 2}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 3}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "shared_object_id": 9, "input_spec": [{"class_name": "InputSpec", "config": {"dtype": null, "shape": {"class_name": "__tuple__", "items": [null, 5, 3]}, "ndim": 3, "max_ndim": null, "min_ndim": null, "axes": {}}}], "build_input_shape": {"class_name": "TensorShape", "items": [null, 5, 3]}, "is_graph_network": true, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 5, 3]}, "float32", "input_1"]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 5, 3]}, "float32", "input_1"]}, "keras_version": "2.12.0", "backend": "tensorflow", "model_config": {"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 5, 3]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}, "shared_object_id": 0}, {"class_name": "LSTM", "config": {"name": "lstm", "trainable": true, "dtype": "float32", "return_sequences": false, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 32, "activation": "tanh", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}, "shared_object_id": 2}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 3}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}, "shared_object_id": 5}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 6}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 7}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "shared_object_id": 8}]}}, "training_config": {"loss": "binary_crossentropy", "metrics": [[{"class_name": "MeanMetricWrapper", "config": {"name": "accuracy", "dtype": "float32", "fn": "binary_accuracy"}, "shared_object_id": 11}]], "weighted_metrics": null, "loss_weights": null, "optimizer_config": {"class_name": "Custom>Adam", "config": {"name": "Adam", "weight_decay": null, "clipnorm": null, "global_clipnorm": null, "clipvalue": null, "use_ema": false, "ema_momentum": 0.99, "ema_overwrite_frequency": null, "jit_compile": false, "is_legacy_optimizer": false, "learning_rate": 0.0010000000474974513, "beta_1": 0.9, "beta_2": 0.999, "epsilon": 1e-07, "amsgrad": false}}}}2 + root.layer_with_weights-0"_tf_keras_rnn_layer* {"name": "lstm", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "LSTM", "config": {"name": "lstm", "trainable": true, "dtype": "float32", "return_sequences": false, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 32, "activation": "tanh", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}, "shared_object_id": 2}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 3}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}, "shared_object_id": 5, "input_spec": [{"class_name": "InputSpec", "config": {"dtype": null, "shape": {"class_name": "__tuple__", "items": [null, null, 3]}, "ndim": 3, "max_ndim": null, "min_ndim": null, "axes": {}}, "shared_object_id": 12}], "build_input_shape": {"class_name": "TensorShape", "items": [null, 5, 3]}}2 +root.layer_with_weights-1"_tf_keras_layer*{"name": "dense", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 6}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 7}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "shared_object_id": 8, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 32}}, "shared_object_id": 13}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 32]}}2 +root.layer_with_weights-0.cell"_tf_keras_layer*{"name": "lstm_cell", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "LSTMCell", "config": {"name": "lstm_cell", "trainable": true, "dtype": "float32", "units": 32, "activation": "tanh", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}, "shared_object_id": 2}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 3}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}, "shared_object_id": 4, "build_input_shape": {"class_name": "__tuple__", "items": [null, 3]}}2 +Rroot.keras_api.metrics.0"_tf_keras_metric*{"class_name": "Mean", "name": "loss", "dtype": "float32", "config": {"name": "loss", "dtype": "float32"}, "shared_object_id": 14}2 +Sroot.keras_api.metrics.1"_tf_keras_metric*{"class_name": "MeanMetricWrapper", "name": "accuracy", "dtype": "float32", "config": {"name": "accuracy", "dtype": "float32", "fn": "binary_accuracy"}, "shared_object_id": 11}2 \ No newline at end of file diff --git a/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/saved_model.pb b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/saved_model.pb new file mode 100644 index 0000000000000000000000000000000000000000..6fb7c3f0e8e4a35afa38cada60f78a6097b84501 GIT binary patch literal 755111 zcmeFaeQaDwb}z=;&5ve@qNpoUqDqve#aYcq&v3|Yez$kmGt%zN&hE@eD`{rdv%7QE zVmGB8kxg!QQ#0BHvTMI*@8bEz2%N-#9XrO3jkV+Vhn6jQ4;b zuwgvdj^WrbV8s6f&Z&=k>(sqf_oI19+W}{>6nSsmsye?qb?VfqQ|FW%_q#v-68R6* z{qHFG3>kT)*?gzwjuPjidFL+s_X+$<;oq;rzw?&~JRI&n*>BZdGF6+LJm(aK&z`?9 zI#L`jx!-b}|HN^~`()~sUa$S2^G>7NZrtB#-Q0I4NU7WE?HzQtTC>ycBpG?!*g0s; zk2G0b^QVdUMN~ac$t1jPHyizy*L&F5Z@I(w_x5&Z;klvjK>gox$k_FEqX%7eiC+`+ zYn%+o6)S%17;gQkO_^FB0lV_&Ry64Vcm|q~Lpt**( zz1Q7s^xe^3zuWFSprh{aTc3OPMtzb@h~MDnjnBPHHps}0y-u%B&jbxgQqIT@xugA7 zr?=M)(MCQ&&VHudW2oHN+kbMibGy}O>gX^M-zUSbws&@1mkc-CyS;hR?lfB;c?>K} zvQFoCKD`Y0K$6^i5JB~p65+3NJ${U@xxc1J*EtB)Lz(Kj3YHxG6g zM>GIb_V0a$^aL5(@9s4Zwm8%m$f4icHof7)KSy z8Bdj5@?#Eh-)waswBD)Rc-Y!{w7=Kx^n30Fa&9a5)7$U1w%Z@Idb4BhdEz!(y?(pX z=(qPe^3Ckn{3UXx+1hCVJMHZ>y=J@H3wq$pKSjWYwzYhd6Nv^YHU4%88FIc0OQt*^Og?$1aaPSi$Lmg z&XO~ahV={?ZhX}4@prT^J4F6lhn%_9=)yPxy{+aAAjCZdg}M)Vwc_1&heA7IBGiWO z!Y}jZ2z%_3op!JP9Al^FpL6D?34a-S3hi#Y!`la)EhsR$wb$8hKL{p9!5xBWIw76{ z(0lzo?cF7Ey7f_O>!82a6PP*p3<{qOqA=@M!6vC&Ce3~n-1oZ!y4QnqjwsQ2W-vF+S(67YryndefKmOhkaxko{%x@-tO-lSZKO#??r`agCB!p6L(P|fpZM7R3+HUakxyHta$> zXjP%-1A<|uj`QYONthGr57T5N|HdH~?(|^|+dkO&CpSldU4I!k_GcV2$!P$I*n9UO zU>z(MHOx8i$NX7_u`XC9$S`9(DMjF~`+LnN@~<*EEhLjz;(P8XE|d5)d!81^5G-Qn z_8VOgX{{Zv-EVcdP!EJ30SPe8`qtjM-Fnda=+%Rr9ay8b9x{gE3mVK&nAM_@SpxkV z+i&!to{lf6$p7Pzi94{;1M}TslR6-dO%D(t_gfFzo%xa1+2RXaS!{Kh{$EpMjOhrl z+{h=kX*7SH$O?qsG)6{hMNpT3Cke0L+imn71tJ|JbP1XXJ)8w`y0g>X@3ni;H=Wjp z9$OYdZ!R&J1zOA_!%OlOA&l_OgU`c~3Ibx3BL+f^Ek+8pv3Y$hx3Hy5JWo+G12*Te6CGN5`0Qv>u2=mH;E&uh8{u+-3g+vRSP-^B0MDXILhF$|0vo?k+md2M*<0+uB+iLQq9J6dlF`tL&&hc=?Kl4wMGh8i%Pn)gB zY{hpcgoNVrhKneWlJg^+5h|m#m#VdwmTH$Ci(eMHTh)bXZQ;RE^Zx4QdUL6{zPjPK z|NEc62+IA)*4{y$HuzqFVAUxyb8Z zWcm^Rfs~|>(nQ5Ksaj_p@9*^+pkkttNcCw_$9c7^IIkH36L5qaZ+5%m|RbrXxTWPDjAk$=O$$jorn^i?HqNwHDb% zeSoom-L>HV(fY7kC^?mReLT*&(Pl`GT#K}wio|GL6);VNW@(A|eZe!8_yq4B55c&Z zfMEPE1Ht&S^aTHiL(XY1KLSmwHHfS@=MOTemtc^~e4sy}5A=*{YK(k?JZtDfD%wr; zRjx#adxGFzX+s_$-~)Ip2b8bw7%zpHey+5fyiK)JFDZ(A&BLBcs`UFV?Z+>{+4A3LO;uCJ*8OxbMZ zsR);yCK^E+rv-pUM&`HyP3(XIG_hy~KojfA2<-BGrjH<*KTQM=l5Lq#{Or zn}(2a@=bE?sPvbp9&2ccd_X`iN0?;WG2+DoMqlKI#d^2b9?M;{CQ(fQo&-bGzJXQI)NO;VI<&{E7tXXUXvJ6m+7*?$$VcC!F4%Rwz0S@P_)YlKh+*o% z(bpIq;r=a^h0y8zWjL!CBgGrH-kt*g8B%-=ei^D2K0mbK{D?CILkg$;&)oh5nfIWw z?H263w!r%aym%V@gKo?Fu-n)Nv%R;vv9dzoB#(JBfHMs9Y52K{^oDVTQ_TEnuz4|y zc^w+7FQr*>$14BsdEas7cb~uhzdm0YbBC_~&(D*eDUjKc<4*51oE8sE$E;*ihaS^z8 zrrUaOumfekF8oP$_IkZm52_e-$3YG>dR}9{O(vkPCC}S}(*n;Ul~T!lhX2l71-dN& zuj8>-!3lh0$CDLZDvi4rf|=F>*8l}Lv_jnRz5RZBxBc~2mrRyUxg;2lUImPw?1cGr zX+kIGw2hoeJ2_``a;7wL&ZZ`3rsTS3L8I+7_jbJpEjWr{$AqL*I_C~EBM{Iq4K%PL zaj&=E+Tt{PkI^ur({SEK11x*CA(_=_xTw)UQ`107m)ubyny=PS6rtG|2*Y7HTdU}>ny`RbFQQHOl(L>SKU$0;yvKw zd3S8L1;BCc-BN`499m3 zfVk$40a_rSm=Jiz|AS5SXZ=5F@W*rhAIktW`|f%F0sF4v|G~a{!T*DO_oDv?`;HCM z4}%{>X~88(K%6PPC3+YkB=*mp1cf3WXf@&91oUHAX+ne9)QNW7to#Lw77;%Dt5@l{t_ zrC!rS;^$I}#F^6TCb2$Q`g|bFVTA%lx*SN(X%LZ9;cX+x)2EO;W&Gy~@yTQ=h|el# zS<^FC_CIBk@(XeN`3B<8H!**{mBgPnqx^a3C268G=o<7)iKgbyGr&N%dVuBwY-5nO!fmAiLuzkdDgyPv(Y zsMo?r>r3wKNbN{5rYpv}*`k_M^V{ytNX-^){+U2+TU<)2)^mR-Qf=aH05woeYT%Ch zW~2e*ZUR+WORChLERIk>3V5>I7SHbZ&sfawTxXMvmcHs1s!N<5OvEeW8(j+Y*+sN+CK3Oh zdpClpZl0vesA6pePO6&$#WXuWfJq@BooRMJGt=yVSf<$ll}fV%j95VchZO`=SV4e; z75u7SFbqE!i$Do>1V-VPaev3G-!ZRPja*#5A15w7Brfk^aoI=`m-nOM^3coGL}}3V z>6y~!Qj5ztti$g%U=`lNtMKVk6R)V_rIsuFp~!e?TRLCyS@1dd@`3&(c%`Zx<9Mm9 zzWl&<+nOvr0@ZN4<30M2`S0_Z9=MGN>x(lR%TWZaqM4w?VxrWtH}sevR-+SZu`pN~ zN|a<@b00-05+|V%ioyznrOD-_Ic{$N!Sh z=82i8u!O}D`u7{P6@? z*9v?0;pz@-0-D}~{R593=ZllCZ8%3_ue^=r+J?8Wx&-E6a%x{)rJZ%y^&a%Vt41Hy zqK}rNk5=H(GFg1kZ8Y0ptL}NNkM`kYhuuc^Y94zm?(KQv7M#2l3m-jCuBnfDUpr{E zzTU#+w+@<}PPfy6qA!qVqeZKh!i(euRG44AlZ{$FxJoXo58w!GujlQwAGJKxfi+T7 zMf~TYI{z<*H(YPtXYxthYln`nljYRkDcs=0O|m+`hkO)QNj3GSaB{NM==0A$MK;xE z0S$u2b_cYQKD&%{!0u6sBPe-!pFM?}uod!*`dpEMA#Zo!-t0pl?nUyPv8*zN{cc^B@QdG! zxqPM*MezrQRwU~NKs$K#lFAanuo|6+4k`x~UU z_BV)b?Qf9V+TW|QEc5_?$|AZ?nGsJ&(uoI)=Eval9~=<3(K|^mIr9zriq3XTMH|y1&e}C!4dqb8r#qE zW%@H}nO?lZV>pOV4m9h6u9~;sd@Ix^z^SIhVn7s2zvs|jamdqg9fG^lyh~&97dod( zv)>qNR_o#}Y<-DV)LRR2L0Q$?)C2xaf?B_W)D<1KbUKY{-;P!>gw`)Qp>cz zY60<7^>I~~hC|IFA;Ap~`((&}`oBk95VWrvibV98#jfonOy zfvSE6aA9(Sf~udH0Q*gF8Btw@SXFPi!|bPt+n0jI=!@!`D7Q-rgVL;PZ@Gopg6bp0 zXYsan8oi!7N2ogsekJvnRh8^FF8R|-_8)i1ysALc_W3fc4}KOMU3EG=1Hg?wrFt)# zdLwoQJe%{U+K*q-tCM)yt^^4*lsOS%_zf&@i}DcS297Vih9^Y>5aMoqjS? zYhuvV*Y#{)!%(SjKb`GciM0BDT7A!^f4z6G3qG-MA}pfHKvXt059|VFBqEW)tu`9p z1p;HyErOa<`(AhaJ+=pa1Gd04eq*fqGaKS_5 zy4*UuQ{g9cfNeKb^FG+Zi+w_5tFHE`bg=C<&A!z}?5vwaL?5}v6Z9J`)n1H=1T9fP zF}dvs)wiaYEQL%|%xPEySxs=!fH$BDs=a!6v5XW=%Ut&&%=H#|11=G;djp}11V#Qh zs+HIt^Sk2LD8!6G%A+zGa1vkV#*bUd_~D+7Jfef!PGXjtBK>`BO$~J{_|sJ}bWleU zCR<%x*znC(pg+H)cq6Djg3X^z8ifXj-|}Gk;Zv$1q`GjhxeDsaJxEH6 zlInJ_T zG&+xb?Z~px#uu526*#LFs@eGUF;LBh>R@DnXnB_uN9hfK&Brahy0f>}eRHo#l{ocn zPI9=AX{gFgpJ=IA)N8yAU5=npwB5F$50E)LRFj;YH&{g57`es6Pl;%3OlvVKr}taWDSWF$6Wdxn#vV!UPqF9NU0Vw|KM6L$4;Fu|1NupRuOAEy@krzue znPkvTj+x}uB$AQTH=hk0l4RIh9W%*mNhB*p<7h@1ws^-(@_G`q0Flr&N?>;=q=u|*&#H4mrmypFW zNf|#HqI1!`vJqWPBwB4Nj)v%5)URwr*Aj_7gaTHkDcCy~4J;ec%ZWrELI+DlbS_F* zHlkM&iB43*;zW0fkr6*Sah1Qxr2jlqKbP?4`hsM1`a5#H;*R~@Xzv1f95KqFhs3XNI^ke`b0 zIU?@}2{Q8H8gb3yIH8d(W`#zr7A9yg8F@h?u8-sn4U;s~JfUQZS)oyDiU}G_MrLTr zW?3Vy#T+Lzvc;^>sFlYA4JIQmXvB4&~5f9DvLH5e4E*%9R5GWS8#D&|6AYwmd6M7vwJVYvBxLTWL7sn%JOtNXb zDVfBAiEJw~Cd!pFMDbl^*0rv1Wm=}a+a`-C7lJbTMD)MZE>mL1f zTTPMQueQM|rQG1KiV7eQwmsm4h_Mh0>#tj2mE8-_z_2174Hhz?p2$A~&Z9G=RFh4$dUiV9+Z6;VnC0#pk7FZGI zXCPo@_tF9@u}TN5jr455{%H~W*NV_Vat#$e8E#2Ee-MY+Oz{xy|x9u(HBSs^|?e z<`pAH$18%?utLk8)L5hxq~9+@=Z56AbeM%T0Xu!+rOjS4EUWD$!@`G`jBuGCNJkCD z5c9b#6>cIgCH)GaiqQm8a=Vkq4cDJ^$VGGY`u$erMBo$0*#q5fWr5Eq3KJFXY9LE| zp0VF--NthZ*OSucB4k9f+?90bL(Gwpb_TfCYHjJawfF$IsT=kiPb%($@1e&qU-a)g zMK4kD*Sdw7G*@6LeWCQbR@5A&ypL85L1y8d{MRR79EhwIZvih_ZOkUnV6#h$mQ|<&+A#T6vDl zu%!{A^&CJ(SO~offAm`)^<7Bc{^+wTH|*^LhzGjcs{Cz%oQ6Hp{y|?vT<*DlQXqd+ zh$e?EM>Fv9u$iWkk<7B?`jDv{3&)c_?v62dPniXb$}_GHUtNl(-WsW5!43-in{+>i z015bCwOV`+QUvPPDZLhh#yuG@#++0}RTG?D!*=$x9Hd^b|g{}MR ztBuY3Yptc`I)vMwW}okOTaZk*3z6+bPFxpa6G7_T%FASCd#}rbd~Y9g1Pk!o5YM{T zJkNh+i~kL+Lj;nE2YW%Na7ZKk+)!nT5XLi*UigE(E_+g+gS33J#1hg3-p<9oy%hTv z;tN`9E<^e|h(H+84oYNHjzzgNn^@vPVu|yKC1w&!OedB&msrA0C@}{~C+%o0Kr+kc zArV>lmaj1hbh*AzI+LY%xilHuyJG!n>9l*9#E9Q(^(tf2# zL)%qY1neUj3J4|^orTg+h8D=5kCR_7k~1GZY(pAuSPPrp>T+!j*mhPt53IPJQkMl86K}h38qE^Tnd*t*maYx`d2!^}h!*mj5pEZTP>lY#Q*p z$dL!XPm@UQWq4X5JeNJp;0d)efxix!Y=9*f*dmjG8DNnm+emguD(d%~uKM?!X0M-@ z_neCJ6As)yVE+~1|MW2#4;sM^9ugaRA$ZHXpr&vT&vGTAB0!d&`V~FcXlN}f(pzLw znE@6d|BX+aMW@yYi-rjtf?Eni)##$%DbT{kmR?ZrS&If*vgl#-HknctdevNr=#dFr zr-6(udJ&h%gOD$hGu&jcC=3^e9}I_n#~~xUHn>#Ha(B?#PzwS0RL(GeeQ_$Ej4)J% zJXbHlev`XX#s5}0L&iIxtcb=7v%~ZbIm`cm5fcCAK_A4x9h0^C6w*j5ipGfKNJA}m zY~a*de~VK4LxiB<)G{Ju%nn4e(mQY>EaPvUy?zFSJ&*Y7REC+jpl^~>pk_g2utj#t zaOnBkC8MG`bR2t5^<9C-1uUILZ3pidjG;*VF|-yGFVu45g(3QL0WYwN<(%6kFQ8U} zF4HrDkl=x0h0jD7zebk$w!WPHXP6~Q^h-prfj?q)>w(4$DWdf^$W*XA!1PMDMy0x2 z>90|Ahnzx9;1z0;&$j?ZdJ&bgtyOkUO*X}f0F|FT#r1!oXHdQTym9gdGcX1XtMRh~ zdyi^R)s7yjnjNYthDvnBKui);?~(Dqx4{TjEhn&+j~=XLJ6JUgmgvgiz^WO+TFwcq zm7@o1#SYdo21|71a9}MP!CJ`)tkoj{tAi!hDh6vxY6t-)7%tJ7;FXdp@e{F3zpN(W ziV?2Wc(`H~%#tp)){Y*pwK%*q!6iC#IJ{Pkc&+7y*ZR@pwH}9;Cc8vu4u{v85wG>U z@Y*R&4CP_B!l~)I={40b6`ofaE z_S)fVui9&0K78$H{SL#va`@WOf)}oR_3*W$^&wpQ+Tm+Q(llKA`e3!+B&Wq@2{sqG z>^XnFNJ@Tz(@{tNq^LOY|N}-8SIIhwE%^@W} z*fETd%z#Bt2b(Z1JkKc;SkMxkQ4f(D3_a_2EGVhEvK`rR-zB#bY?sU*#CR@idCc;9 zZIg3a0x3w%23frF-;jG`#?a0HK9uIL@k7+oKwh!^9r_;}a#|z7ow$Qi+|jc>O9Z1y z=d|(yTJ`pd;Votl<9jtNEMAOheG1^P%$!bcpNz?DqOSi7Jr|B?D%|?D+8*Rk}*k;bV z5xi2uz-Igpon45W$zLX8UExV4XYu^$Md6B6zj5nrz%a!*h66(-&8-vH37A`lxTW=F zNGlz9YoY^L3vG&kv`h&nOJ>hdEJ4u}2#$I(A@m+<1UrIGN!BxrKxzU&ne-2pJ?!l1 z1{{8v+FwVtV;9dUIpT)8Eu*b@G**#Eqgsr$B1h_7nVak6<^4damZTWs{Wu?_@ZEPpG z4yJSbdj-6UJ{9NE1+gP7r`Dq`KgpD25 zL&}yaORhYGtKWq4|1YHWn2W1W*vojfaiSpN!m419?Ubm$!4UGN0%kstjHhy3vb zS;tNUUf>PtS^NoBmsYFZYOS{Jo*}1@(-HUwoF%8x18{|kJYpV0p;FBKDfDOs9xW4e z)v3iZ(z69#-cA#4%ppf92V6tYt)U*u#4gKkF%`Sp=|KH25EU&WJ~Jv5UL^Rgno!Za z9*+XWt`bCRzl!epc#P9!u^FKJCoe z0h)Rd1Xq5EYbdm?n?6>_*hFZL?Lh1S^X%Oa?W zy$W#&Gqws5zQbOHxSbhWg$Pt(uR>gvjIBb1WUyBu?oh^7A%YLss}PqfW2+F??Cn*E zTbHp_hzsRVg(joKd50WO$y5F_@h`|P_(+!}>;zrTLBWQiDE`3EO2Va2XhoBRvNv2* zA^v7orrBb?zFe9w*y^mqe7oK!=K?Xng}V2o=Pe;+A1@;VX!uQ2{rD1?zAC8}GO76Y zOs!yDUu&hNT1l}!(SWsnt%ViUf<@)0KcPibL^dgTQ(s=6<^RyE z3%Y9F?radw-ux@JT*G?k0A)3wi?XKLuUM`RV&V+@qtpZbO&sdo-_5c9_<>OcZs~NI zG-ws;EOl$C^p|69vGjWm{Y5SKyVe6~UyTJyZJTk5VXYcYfdM1i)H4|ynHA)U8YaJf zdFN1Lv9vnfAYlEn7G5^gi!^W{Q?u&lVyL2A0D~OnlM1j`n$5S|VfK?6xEhmOvjy?zj z{FsAOZ!o=f;rasT{=qh2@eVTHzQ(5U{nmqaCx6TLH;_7>svoURGjlI&R`GA5ZA7ZZ z1yF$xrCpYXJo3yPaOh7L2}<~wVi?@1@W9{*0HzAq-XdUh`bovZrozTSS6`#VmTA6* zp;F&|I$O)4{+{}NT7A!^f4z6G3)Wimm43TKj$D{4TP^)t1U0Glz3%vXt?pj$jrOA! zRWT+^8)G#ut+(;$$0KO7x9FcE2!Y`LbsdhJ_Pcxg?o{{*9bnr{mjfV^w!g|$L01R( z$g9$7>d*Y0D`IEeB!aifzU>1o)q&lEwHPvDpeWx#F>Ny))VCJ&H;p(@%xPEySxtWW z4^>d@)x(RWTy<5^w9Iucau)g)c_YsY<`kUfQS>O?__3HO-r3-`lbBd%Mf&^Nni}d@ z@F#X1QXNT{Y;_T0z&AfZfGLz&DX3aZ)CdIZtXs`~n=}fYPmOR-X3!6xQcQ?;A)KEy z1u>;XNp-}&T!ptDpmd?=^5I@Y6vQ7;J@!SES%gVcAL5M1D8yH|A)5l3TIu?>jSiE- ztvZrmbsGfH-?DAyWKRx_=@!NmO;>~|COVKaikQ@>zK{2MzTg%Eemjq_T0dh7kt910 zHXpb2D%>CJzPZ;#*~^klpJ=J@110&$Lzg3H*stC`K<4mJO>%bLNJvtZN;kYmwZsWJ z%Dkc6yhBnnm6xALil!jnVNL*kfF-F3K*%zfu_;T4vJy>f3zGXCTU(Ia_}JQltipM< zmB{dr&NpLQ=6zQ=d2T@--JF{GfRxqhxP{MW#Eu{G!yIE?&=*mlIlwo!yVVIZH zNYhq!v0-DwWV*784O?4(H;R)m=!^&~0mp_nx3_z(zB_^cqDUFlzk!9Ag_ke{-!rNi zI6kXhkzCAzB{-=hveYV#&9oAG$Viz`5;@5xltiAh2_=zta6(DsGn`Noc@ig-ME=ES z&uK&+Sxu9QqT__5qG*_silT8!DvAawsi+z$rs6X5?o_28*(={C_xK%M=jeXYzc59| z^k)eU`ibmo3Hzx1Mz;YTBxrkz?1=g(-$0XFg#uNZN7K{^CZol`X5UHPOOd2Lm*nM{ z=&dFR6~)*`LNU8w&IXzu+X&Z^C|2|HNGOH{`IsqQPNG72y zoFO;7hfu&$5uJ+$mW}A;#F0LP4wj1OT$HeEM6Vs8xfe3PqJk<8z{e|t)QV6Be2-bQQW6YNqFRp;k!rp37v9<20OMM z4>Y=Sq4G9OfJUtYCTK7jd2x-*b#)|D$;>r+0<+Z6sMW#*4JIQmXk>D)yrE%|hMJ*~ zEoPliYE3aggUQGYP1!7KWCpQggho$KmzrzT%432ClaUuRGG*E^LZj!sObv}P6_}vG zWaI^n%n^5t(CC>fQ$wRn3MObU83RD`(6o5Vqpk7FZFFV<2E<_tF9@u}TN5jr4qgl|p-PU_}M7z=|j(0|6_$mljxwRXSj8 zq^AXzXH~JvSZTx<99U67EU+SW%Rs=&?xh7*VwDb98|nD~D@`DS11lUJZg@}T>6vKsK@kT>F5J^c#4sjN+vGL^My2d1*N1(nytAUL2dB%RTbsNtu)SMrg=0>)At(Loz4tJAJ}jp>E+e+=}9r@KPv>xzCw}N;z^fWs*$bJ1O)J zOt+#%t;`hfJ2N8x6kpu(LIUW39Z;MNjZ6%Tx-F85Xp5I-j6iqjADXVlD54lM2p`QO zAgh{slrnpgI}o+q6bc+s5iBi2O$h#E63(BiP9;<9h(3ycjbjF)6&yta;3$Wc9@TQ{ zXaJ8KZM_;|=v&XQv!=5aJq6;FlEzKFakW=w6ukN&<<+iGraZ*4Bz-&$T?SXo=&Sh&C1*j!j!+FDxMXs*;Y))J$? zzf5Mf_qrc8x=nBUptHqJBt-V|XWeU_=fASW|E|^6D(T|K&y_C6Bv_v-&BeaG6#Ew9 z3))ht*UP1gECOLfJ1CJ+ITq#8Y+{KEi6zb_mY7K_F`ZcATw)0~p~PJ2Yz$fpr6~v( zoObD|FO<$?DPAs3#`dmQzgjx&UXIH$aIIb~O+d_U+q!Ow8491A%Kiz7{lp+bu9U`I zO01#kpckcVRhHqCvDiYqM=A?s0+hq;_5%-MMfUgH%K-csDMC`CDadn9im$;hL$$)^hc=uaafT{y z5@*}}-0fG%XOdZ^(P=`<{ey0YnOK?}d&W$Ayb>XPT4YKfpSbo-hXTF z=DyNgQ;Kef-Xp{8OPBwov+rA4s?3u)ud#)c1E8Sa-P`eCtN=o{@SIbbj;yXQMENP6 zD`CC!X>t`5w$N)K*!kX*H-mKk^b8A5(%pxi1e)wOkR zWqD=!B)1A!XZR*R6qXd^?MZGGw(6YZR;hY2PmCo!i#YR%J>fgatrD2fyp!B2*dk>; z7dgqT(gOo?ptw*iV@OpgCAQ64Xh9VTCXJa-uw7JZaZz{Fh<{EtPC%!ba$zJi4xyP4nWf1vPWUAv+ zsPE=v32u=?pj~hD&EqDC-7@T+x)gia-N!pdVlOjP$UMUn(G?iBd zg%nNYl|bQMaw?SU`4cP-z9(r{`Y|_pLUA6f7;3Rcq<^7wWfbnpCuvut(6pu2NKWIC zm&{0$o+K}!k*2M@q(+*yvWtzu$Ll2R3fzwHuS*FjU=HA7k{9N%CZ>|+U>~NE${`r( zi~hw0OElvETA zR8mnjQcPbzNxQ;wDyfS!YV&Af-oP))a+SuulRQbg5}ITDZbU9|zcb~hI7zz_SvZuK z8Gx&kbCBc`@jKHPpQK%3cPDl207?DKm*vuAnmI|k0v_DGBrED6vx1$ZU5PTbQvEVZ zuamSZrZxE_?Mlick?3U3mZIS#?MjIKn2(uPRU4!MCUlBq94Bd4(w+Dnmc38Xt{B-n zQ4zE6A%9~88B$wh2vAFVh;ezt~FY?Mn;M{Mric#U#X!{J9ZN^n2fxjk)g5jhUO&gihs;M zecgp)es;CCFkfP@#6Y75cT3GRYUMF;jjWW9aAvNtWTcS+$BvO}^r(@kp+RR7hVayw zjJ%+c5!Q|o8a?h~YG}|Qi~$-<#sJV5(~QWFaw(w+oub>06i?EwTt6iRj8k`PX=wED znW=dM9jY04a7VAOhPW0WK(Vr zH+S?PzUk)Pe|t zAaBNib)V#Di3sGgNL)wPXvl%hqXf@KL@{I{Fv ztxDh`-)@EyxXcj(Iof(HsCIKDlEGyby*1t z9Jl1GuUMCy^_93KXMN>pmz+Nxc*&6n`D+WHqkaum9H9GT#bMgYajrPPQNfA>{ByG6 zte&hm|Io?a{$W>~JCHW+X126(WlP$)k}Cxt`B_T-hr(eMQEjPNBy&6yBKz^U(QP;G z@3i*zgPe$HOCvWtRdR)YP~{mE<3EwRpauC!E#?!I+mcSA{ME~|4$o70?l$yqk^Oi_ zcJk)_$Ic9U;8iWpYoX`#buuC^DDi)&L{*v1?bj8;=_tNR&WV8k5TBY|lvwQb8(WWZ za=IR(9+@#z4|$bF;IOZkAwa45YL#PaAg?f&4*jLV-bUxN!U9gE zd|xrV^^|R&8s96!zA-38v2s2Ve_jN2K2N5454-KoBd_sMy9a4Tdym}0tV3^+NgmIB zaUaII)m#jQ!&Xs3bm%hekqKO*JA4ajrsp+k6$%${*+oUZ9}}?Pb8<1(6a5xBB`V^) z!i#d z>fekv+b8L5GR13xiJ#j*d?<%Ds54qoLZ z`n@lZpC2LDJa2oi`ypex2m1$t@jh(#A9~x6*#15!Sl()_wx;cZCP+ynnDLgt{O13) z#s31s+@n^v)7pVI(`2gK+B$#(`cB^qo|ee){dS`V8Y25rif{M}Z)6}Pv-Wo_^fzpD zwD&9U_t}sN4UeFKt%GK#)9rM`2mW8ItE__HZ`ok*H*4@T84rB3MDf-~`!GMjdztlq zicF}#Jyrp0j(ZpKzg#jId@ge@(Z>T^Fiv;Nf2 zx2!)Ex7ETmh^toNr{bPa_^F?}Sg%*y&)C<9T-ibii209Yk?x>tii$J{J(G*(+Q!<<(P(FyN(M7-0VEWMu zssw8@+OLW&bFeWRCu|l}g&^beAme~c;5rS2faIVTahW_w`J!aJ#lVUFkOSh-?>Jj!qEaEx@gQ$Sh=^2<{|zkkAGLfk;}l&i z0UX%g%S=;eub+W^xYWAEX8cWG<}5Oq#iMho?+QG=2GVI! zn719gV=!uaVkd6t?fUN>+Rzr>lxt3^D1!c)0~R2( z-c<2=Gs)*$03*E^tUJse0n1XM$I0WmXximBB<@ zF~YSP4_AzGP}0TL+R@{+7KfK6xI||Thu5kRueH4JT0eTc*5mNfWS8j7;qY2B;Y`*G3#(n)ni(IUHW=M!Yr-fft(W8>(_CSd_(h5FJI6B%AiitAkbk6+!}iVaZ;5 z?eMi%?X@o-zIL>JhhbkieC=q#3)jAS_}bC>5Uzdg@U zc&lJ=?}@w_ORCfAp*q;7>LDUUJw&vqhlm*U5K(j9N7lSYp9)kO`nwJ(-C)+{+i$%E zgCKvSX!EedW#lD4?9SH? zTCK0QT>lq(F4Sf3AvO!7^*;e8n2DaK+5>AcSlCFBIzzNe#$}6ea&K>QE6+A{>vk;% z>$XGTz5+IH*EI9?Y=|k;;A3+)??~`UO>dj!K6G7-A7sOZX{;-bI;BNy{`8`_6kES> z>uo?Wg-O%mvGg*%XY?JhQm05r%O!BkWA^00a#0C&)j%eU-b0OG*NQ30iH0#qO#oj!N475JE~Q7XKDUCCUe~lV8Wp#`WQBUQ8dcYC$F1soN zXUdH_Qd@^El%$K>;HM=LmSaKkEb1~;DFup7@; z$a^+7o^-xlBG*@aHYuia-D(;Z;%)y5H-fSOqga=cnvDk1j)!d zE32Dp-pcxV%>@UX{T*0KAMWimTV3$r0>32mfVnk=AAzG(_z^f~g&%=`mb*+)j7YV< z`jO4W9$%C3@X_<+S`e+w^Y;<#iST;&eBAT zTvi{b1AW|qHBwVW{I)z^o&Oh`#`|5kF%$?x==eHWPW_$24L;l?s{?$#E}*qiQ>~Ez-}j(>BT$D6b~L@>>rwSL08S&y^0F;3D5#drN10= zi>2Rl=&v~B>9`Kb(*bonHm6Fn-xzDET{(_vZ6Pk$SE}Bo9`J7x)Vgo^VZG3COQ+MQ z_U&keZ`%GM=x5K^>~JI^7^({jx*K zQLtn$(!hmIlvFEy0;s@Q z+Af_!uD9k6IP~KpSx{V+Vi?@1JE4K0idBpxu;nUXbo$9mt%*TbU)QsJ4MU~A{dBf( zCDQ8qY4tsu{`KC$E}ZR{!*34~=&@`)BHSXVZM6Y*$KPvp_j+%DV?K@FA*%^G8#%Xz zXlE>A&rnoKII0Q-L@-h&Pic3`k!D7g2f(RqL0aZ|K+Jl1n z@~eubWv+IFx!xsj*N-ApGKKpxC`F30c5qr#Wl$X4ni3PUqDcR}w#tS&7W|35231EA zW?h|!UE}s>X2^n@11~8i391$|HUa_je^Fv!5cyPOs^e3tMWoem)VX%*Z%%O_yL`kod4U~O}twvRTevcN9MZLz1$z&cNOL?d!IXiFU z5tZp4L`Fb@K<_edD7OGmDVoa5mn=n7%Rc+q=T<7c`yTxu3BZ-iOk*2M@q(+*yvWpEH8zy6zU2NFe`nyqr@&@Sxc7q?~+}z&owfgP^{)<}I zAXtd&UkCkIX`!aoFi%J&}WQ|3WJCk;upqG3uZipD9aC>p4wqH3g=zRoPaQKYJMIG#jqeBGsVkE6e}?^ z0L6cu$km`095cl$Nfaw-X`wjAL$XAZNe1oYm`PquA{j}2^RdApNrt`EF_XNOM6yye zj%Ji$i+9WAb5X*w5xtT~bfOv-C%RLNbS`RGHlkM(iB43+;)veG zCrt9VQ5-O(JYz`}u&xZjQHv}<>UTANJ*~aSS*LyD%rcwG?ZFbTQv3DyylcL9gP#HQpXVS=<=v{eZB1e1^pCRjN5f66=z{JBK+)`l>=;1;Q#5npF==UA$vlQuG@Bq8|8}`uHX*~dlE-4q| zpd!ykxbE6-x7F}$C)5V3lyVUaQ^P7M@Y*r4C`9;$SXh7E0;}v^fCh#Y@n|5lg?b|Y zOaRtKdOpBPQ8_rUqJmgpMQp5rfR)`#3#`N{9k4di^8r@M+rfbq6~qE7qKFLytn6M| zU?o=RfVGjH53o{b4-TxTAQo5=Z*3r8W%tqoE3rxktc~=v!1ByWRv9ae7=r^VDu@ME zM5Y@ESlPX_z)Gyr0c#^YA7G^kWN=_b1+l=27=8l*E4!B#Scz3SU~Q!51FW>83=XWQ zAQo5=?QkGqW%tqoE3rxktc~<+!2W3w``6;QM$(HJ1&uf8F|rep9vh14b|a&wnQVg=kT}XLd=QgU0yh z73i-P@XbI`4cG34?+qGfv{4E;*VpcY2aL_Ii6(gAhXV>8*h_|WJA28nvcgNMzz#AT z7QenIU5NyN5wwOCTK4#qBA6uoejz$HB)6r*EUXFG=?gDy_L5;)Z7&%XKD=av%LG9> zm!aINK9{A!O{PZw3ZZI$D2Ib{S}r$Sf6^fr&DHDoTa^=mPaJ0tDCGamVxmzLK`PwU zK$iGCQ?!LjhlB{xTeqC#ls*?B!=mM`q(dKKj>P@t?F?|O)!Nc;Yk>lAQ#b54o>U|T z{|*cX!N2bmy+oxj3cxnpic+PJGlb8X1<1fY>51%wBAi9fog@?ZP73`4)2(PxD>KFW z&Wy-EMF?+ajrmw)pPF2y}=3q3LRjB1&BqIRdh(sYfZZC%Hqv zVazFjs0fx8VMPi4WD?Gwt4?LGAQF8PPanq>L`yje3&7zHt0}4rse>&%My=vFgUG^& zTD7P=1GKx1+Qrca5z|IAgXh&OLGqofapC)Q)6~%=_En#8C zy_iE%<*r*;icPHR{G$r;4><{fYyzt&;eaxI&&THh$Pi@x+r@B{^}j8UzbVuW0o7;1 z8HIQ@5>rXT$3v!a+42;{$Mq?*pi)N?`-d+)MN|Kcbh0=I1^!LCpF?&2zluHn{POn7 z>ix~l>Oys8tF^GQ+^j8ZR+rZns%y*lTPrKAW@{rc{`|{iW_zz|2tWU9RXZEYVKtbEUc1x0hnyLR3O)&E?WX7Pl~>IF!h!9E);kHnGHo#1iKd zOUxvem`*HlF0q80P-3ohHU_PQ(i8+4PP>@Z7fNTc6fc)1V|!PuUoD+>FUO@MxK^*0 zCLq#xmPn)U$*HWL)Jw!&BLgWG^(&=umlA7$yL!D;8nEs%kTRB)lxk@->n97486pE4 z%$0^?d5Q(D&cIWJEXAvkl)=sprZic=wL=+NApg2TerJT7`S4+Tr{&$>Yd-OoH#gRR zk!Qst@e_hA?Zb8x(r-c1grYl7uHd)3umjya*!9}*mdC~KL75qHR+K^M72xp&axU^1 z;`Ps%U$EQ@#xGa~hKpoIRHaAmTY^^JC3nQDeyh{l>o&UG#*@djZVSRfi6=YlUjIR> z4?W(MPk5OFa1n>#i>T0ccW+mG;w?chgEn^?{oR8dPh{hOSB`s)TorA~S5>du+B)cV zA*l*3HcO^OvHR^tuhDFJ>|a&^b;+c71oaQba{5hPy?i@Fg%68 z=QMy?Llmlhn|x7pBS$=;Z$Wz+n$VG_$y1^|E0$2`JaI)Kj3hk&jziY@i^aQq^j~44 z|4!{TjJ|yG28{k|t^VsU`im`*_eN~Flifd3-4ghF_>bQz`CfrMpSmmVS0Y(yz~}%BjujSu#Jj7s+=L?9avgLw+pkwkf~Clf2aLmcdrejB7dP`7z8zj zzNVa@WqMz#1Bf^mQ(B zLIMFp5zE#1sM(~nY2mM*g&3NrOMgBF`B46h96z%W zvNfamxQyqc9fz;V?aHEG8_aE$Gbv%ym<;nch%imN*ahT^M{;^>S5QpW0} zx)*dp6`7w&+~vB-TZ;J?eT#~=<8jT~AXQr1;`^nwhmbKD@(1O=@pq46Q=8%N=YII? z=ZoZ~IE{dxQ5D?uh@D-;rPT#fFNrtxI`wN{NWB`93RdRny$1HwxMDK1?;My;?dJq@ zU?sJ`&1eu^0E1`-ljj_)@bzF|B4AT>+kT{tljQ!Z+ltbZQNqpw

x90e?Ck;gYy{oT3Pe2y-Udxj*$KKck zd{p9Dr&*e`WpYh?=bP;8AC{mv53a5!d;E^$eC*8eN*3k59Fndyn(s)6Ztj2VToSK^ z(I)bCmZ8L-Mqbh2l~;{>Sqr_SZxO;jm2Oh}A1Vjb*9Dvy*V}NCyZc1+Qj^D!_6cm$ zzBX(>^b0v*nWI4uK?+JiuAvRKy#z0y*l;PST5)1q$tyazL;p{QxO@OKV%&*4D1i?> zr-mq4gDIbBdO|IMvns-;m8l5wx1jbyN>MR-M_IK~P#vdw54-KoBc8nirdsb2L@;*1 z7%wQ{DXJFtfhStc#bCs2yRQTrS(*NbL(X!9aJ}yEEvTPfih`@~nTve2sIEUV?$=W8 z6=SAB0ce{-wQ(NwSuq7+PG!c=&M<#gR{%V6sq6ZmGxTwV_hah0fx$DqbB!Z_tu>mK5(ozEMI-wNZ!vS2;_D z?}8t+J5I)GBjV9zasf2wot@TJzx|kLot?%5P_Em}o?9S8mF>te%w2NZv~N3{Gaq

jAvg=YY~nP<6;I=j5~pv~RhYYh%UH+3^eqTd3#5i-P@6uHr&E1#IB}R9$?!#*vc!dtH_->4*qJJ44K>TI+UHuN&g>G6MFGHSo9AHwm7>i1- z-1(Cq{SAlw@dR1N!rkK;TewTI#qW4+b*&0Uq2+b=3^|34>A*5{mYhZpz!xm?hVu; zA?i8_A_Wg1aS1oJ3K8tXUWK@w8(W14 zYhkZKT-%MULIiNIS0V26##SLh6+ji5jN(2}zIc+8z<(zG1^ERZ>9T}5ddbNv=8Y(d zKQOc+c~xr#T_=WCG)XASvAKxz&a#`i;5|D*O8&VgnC~XhJtq+0+TS3(wZB1hYkxD> zUHm;b?Ses^zE92tVt@;E?+LgRBU=GpMl!2rWia9z>c=Ph`l_T_uw7&bt*87-6}V{_ zTEY8ct(8H1wTvy`BY&-hK^&WInrOf$`&tVt3Mq?qlm3Jjs}o(Gly`cy6Njt|x@z8j znh?$l{wuaD)_PD1mBo54%9_eNOwIREF>%38QuQ|FuB8liE!6rQq^|fOcg0CfI-N$f zZ$~S9kn@XLS^=#G^15v-P|8q(lUHlia0(0<*`^*lh6_!SS+nh zHwakZZGaNWhI)|(F6HEbgY$+e$_23B1lMo{7kWp`Ir_t-Nn`Z9p&fZr2Wz~(K!Sr52KdyyG(6oKNXzKGMi;g}B1HggC zvLeUS8+4xc8o;}Fzx4pDhglCRpYl(M&R?Gna(EMVYlHvCuI^W}^`q4R-~1bS9GMP* z&|1ybxB#k`QwJRS(?x<(iKZ9^cj``PV5njh{p1vJRlw-Jm+;Pc8NR^k;vA=-z|cgRQq0c{JmCpulELQfvJjK zV%iw1d1<}FNk1Mzo4v*OJhbkV|Nc6h6ob`!pAN9?rfS|NZGV-if~t9v{v&X9R&zV) zGpW~C5j!VaC(u$I*gY7VgiIu8i3*Bon`z(JK-UzLrC@NyoNBFJ^w&UE6PQwaY`3lo zs=azpP_F%vqG_4Sd^3iHK8P@T5iFP{t;F`2`ygGTKLwuf3K-q^v6w27-r%;Am}7z> z{e5js4RtK|6Gzrn9Z8sMb$--@b3UV-T|w1C#zr8}Z#mecQAjuBrcS0GKBcfmyAaM# znu3_pqNK!-U6~GVJwWNYYVSovLHq$#))!F%8UHU|U`bRTVq8)1I3u~il^@t366qOt&zmXu2XyG0}%X^?kh8^98pU@LU46{fsI2 zmh8M9vQ3H8B-1BaD*O;!KJtyuqX-&SUorXsnZrXh$=R8Ua(a(yDLHhMc|*Cm4yI@- zFaN<5O~DnMoB;d)OHvbnkiRlxQVhbwjgcxv9$&Hs`F|qk>MdxbjG&K z9=YY@xdj=Tb86}XY-!n&2TDR%%4rO#k&msdzB#M=^v``%5+Fqi`B!hNx%p|WSk&L8%=0MVENQS-DF_XNOM6yye zj%Ji$i+9W$Gp2*-lHIdT$ybunYl(!jh7l4wOW{9G z;vw_B9tR$JYT490qAU_7JaGRr;-R@d1gh2&(ioFfn3N3iJe4HSVaY~H~K}4g;-dB-2$uZUVsLM74fJK ztG+xY0Ba*XA7G`Z92{6tK`gK$^29*E%I>8FR$`ToV{N481FV#{g99rnhy_-};}{57 z*}b&DO03cWYa=}$V5QI=99U67EU+R<$w0u$?xh7*VwDb98|i6*WfAl2GFBQf1_xGD z5DTn`-7*lcvU_QPl~|<%)<$|hz)BOy;J}IsVu2MAZ3Y5Xb}uck603B;+DOj_SZPNY z99U67EU+TZ&p^P+?xh7*VwDb98|m4A{nH}8ljR30;#!~Z$#6@;8w2wB8T1(0iAawP zMRmK8QhCsQJ6VlamDZa`wWhK*RmxP>rX85d+7>)hS(^+um9>c_6cz^$gMT^Ce-?y< z@;$Rl>aZ`yC9yz%wLmT@wOv$W9%r;s3OLu-?t=%6&9I3kcp+n7Ed~$lCBwR%y<}Kf z;U!h{29bG?oBVXVB4`aOw31?6)9)9eb3<}lWNsKS3u^*)`oc?_y<}Kc+e?Op4=)+v zGC`1z8j2z2b6G0fWbEp%5ULnWASJA^x!iF5NrzlCSFhi1RZavxaqO#b;4}KPxh~e# zKt}mIW53zDjpr67c`%E-<*uYdA7YM-v@^i9R%=VYt;GkxP2I5Hcv5i}{Od9d-9`Vt z(lSVr^%CKg@KPvTx6hdc2zWgyquYw$6+L&7OyD~y^bbt8qD8Iv4hd@67q`5S zpgv#+6lX&t69c1ei=-mj;;i}RrmHcED8>xJUGWIWs-_;L%%0>9?HZ#vBPxQW zMTnK)PbT5~xoVGOV~icqCq#vWM==S}3XY-yaFoN^iE24@jD^RjRUAVQ85U8i7ENb> zcDGTxII)CSw=yeyzwVDxxgj`9<{p+d?>` z_n#EV9~JUXUSO^oGW5x)7TPQan^%CNtZ6T^{5+h!)>z?6%xz-D{rbzp};u zUSD3TxMbqN9=}P+YJubu&ka?k2+Chno%CRVyW)n+XNGx$avBXSbiRr`==Mqb}2_@!AXJgP>C{1Nan7B|n zlcjjMG#T5wA|#_c9iNQyTD@ADfSBF3m{3M3Q7xUy{t1cw#2`YhK$c8OtfA_l zmf@4J*iWMIQQ?!(te<2|sySC0j^!X0a5}B>Q(u6jni+~$AtbDQ@Iy)+kJSt<3}tA6 z{P&dnj|K8+oCkr++SEBR*Vk4yToy;0r`qwBAjJG4nd?GIE?c6lXoActxxzolkQ;MB zeqv4i1g)sFm|AFV2;I$dRh}~>A^F&uVNWCZNBrmE>=g71WJGRv`9CzkL1~P3VH}Q0 z{8e&J3nM7<9p>a_Fho5vW2kHRKK%;Wf(Cf-|_Es{7NZ)!?a`d}$>!y-lWgO+nhi z91?eaD2FzvGhrw$hI^n9q(z*LPu6*xoc62AZ8*AQOx7s}L@-W3ThLc}UTZseKER-S zjhKXVB!gJ&aubv9P-N)Vn2~IO_7B2F-8+K21`8^ z0z6=J>Pr&!<`!V^StYNXfMdLHZO&U<2Q5D6^&-f&nw)m089` zoDpyOeS>(Q=lRo!D(OEDv*;nR*kKc$*|=7Dj?AzlAR0qQ=%Jm5OleoEn8me8hSy%Qi9)l449bULb4B^^H2Bd1CBEnCbwAnmIGVS;B8gg<3K*s;7twQOu%;~ z{v9(CTu?8U;bjRP4Z}-EPl3euMHlY$8~xVy!A@x2&>t=m+dJ3xg8D>LpI{pFa!LJK zRlnBMugj2e7A2vbX8%=A%OtcTq*!wtu=m9#piQa&IgpqmE`uy&yrm;fp*TEJ$(Ts? zO_oUZbGKh5pGjsFNc0Dr=KevKXRvJY^uUan_IkZm54fnh30Eyk{!)TYY|54)JG7{C zAomlNbhGbUS~`UEx~!6Lfnzh|7KOaXPlBFjkX(c1;H+h5>iS&}F1M^kxsY@eIVrDN zWXibf&k*vjE97@Z$e9lxws%_I{k`TBZ*6032~3Ey;*t2dv$wYoq8y}ks|(rA=E)WO zb{8_6?H=rUZFt*Yxlkd^nL9(yiZUogB|N@B&P5(W^1>PO3zomq_yx;id6CSBsxC4|cpqt!}5a!!1zP$W_s%e8mh)TL;}P@D?sMOQuD!`|U=r(QJC`UseHi z$)tGGmi!3G;-<-z{tU1(J*^p^F3-*wYG7&OqV+=ZI2u5$AqrK$O};3)kt3eax1c=@ zP3Xwe>6N24*mMYQ!%NxXlc6_fuo=@Es_iUth^eEVvMkgkHIdv~}z?8^Lx#$%u=g4Uv z^S~TK!P#g+bm7fL|ILG)Af5P9>WopwtOqaVO~znhYrTfO z1`N!b_K(|%KhB9bw>(**?4+RT%#(2<;{W3I$A8V!@Bx_-&B%n|I;FXM4jpQJ0}!`v zXn9(ENABp0+*m>U^&yR?T9zp!du7m-%3$E9Z=Hf*;1exKiTNibaz@a?YX*ZImI#!J zQ!LzCFsBvFlK$YiCe5DL#-f_e`>1w~oZf?@RdjoDZVGrKiGC!#yBG#Rjis+C7ig8< zm+ZizYyqbIDfv+jwb@2p z7xo~>&_ss65kLHS2IL*J+cP|X7%?QW^PIk%oqwo6VhA*>GpoVJL$FngqWzpB(kHgl z>770tM3VJjjK*EbXl(CwdtV=NC!3((hdftuE@S4#(I{pqWIt?2SDkL8uXB+T5^OLO zO}PdiHPCJkT!g;L|<1s9GeJ@#SezWj9n+?=FclsE62hG z$zxTv8<40LkyFoRc6qsgE(f~`>kC9le1RYP&O4bpsd(u{bdiOq^32@M-Sbx!i=!^V z=!TEh<1${4?%KXAuPckHHdxz%>_8s3KA&n}wiLcJCc}IuQazT!qwb1{m`MBwk@~O< zx0QZP$f@FQfiz{Or?=fppn(nBOiz)?7SoH%9VkEw+igbHlq7|TjKcL@A{Q+7ah3+K zxhBG@j)yjV1M@Z8^~-pwDHhi znui?B#b5lrQCw;>i0tl%l?|UKuM0a+_?wPVXI7(3J}#8G{G$++w|F|r5*XQ$cq6+~ z>n7N;Jwe>+TUp^vFlmb`CcAJA%-Gx(n%BT$&HZaeM;Q5f96FhAIltwDU#IvNV1maN z$T{&9KMcwu4?-w=j+~Jw2n;~AMmq?_MTVkcr`6i%vpBreXM%o>5M85(J~N9+ zvHa*2U4pe4?N>~q?x<-nv*^+lf*QUEGLFbJuG2yY>>TteE>i?4pO+I)2zv?GBr&~|(yOU6hwk9>rjE~>vwXb; zFw(2Rxx|KfOLS#8ut3HxaieHvEf+MiRz?rj ziW{tD43_B1a9}N4!CEN@tma6-+QU0m6N5D?IfMWc443Fk@Kf47s|qIN!9rZI!qtq2 zD~36!nA_Is=t7*k+wJ5ySMvvE89A3uWB|0-4UaMBT){4Swee`&( z$KhpcU!pU^;k9PPYkdg3&|+WLmCKDq3(9GZW7pV8Hr$oh^Hu&OLIQqa$z6M6_}amU zL7lK+V3vok9i89d+E<3J9UXY#+MC1Ij?Ra0?W@Drj&{>sQFj_V6WD{x~Y}b3KMQ zD0H(J*XqOz_lSM0ole}v`B?u^351P@eNbj}{Q*zfu@v&MKCkA39`fUNv@SUvst?Gi zyWoh^`$i9prNjq%A=G7mOB)8H*RzAC^7KH|$Yih}VIyT)VuG^MvLVo=yW8v(*`_Jo zZWJKhF43O{%U`s18-{p$E;K3h;A3$&??~{IUf$0}miJY11r}_`SAxf|fD6;9zVJqq z60!N2MG?lQb^DFC0L2s{O@~|g^I(A#SivBB7$p5ug@ ze;kyjlB@7+=g|Del;~X4W0hsI@M9fz6!!P_fH#TkDX_kwAzh^IMR&^iCsBchHy7+n z{obJ0f7AngV;jrgUx$Pa}Vmm4)erh|AJwRdbTB>fhK=`F;FRK1+EvP|IsfX zjjwvarX-kuHlhpmoEo(GXF}?HC~BUt!JKCXR7jl7Rpp*raBdbCI&@q-Bs?~Y3*|a4 z9ul9L#f8=#7Y_*#&Ei5GkBf%{Xl8Mtr^m%Z@*}gjP~hX@A@PY>Txj-j@sRWYhA3p= z;JMV~^WdNx43URK#GED0VEnWjHh%gNc{h3_a`(ZMwoq|)P%(k^dYkpe#^y?WVY7XI zX<=o(*Ij6D^qLF3wfoEMmDP=Queq8OSnn>md>`EPERpp#yfy6l>Ibp&d`FnQ<+bG{ z9}IQU#@c(Ka>9F##7IdYT{F4pAHfC-yFstBy}1p%Esy$l(&M_z*5|>F>a|1zFj8|= z<9QMNaqA^e$lZ>qQW%Sos-wz^C)Mt)*TF37mDC}|;wltdCRA7|vcj#_T`=T&J)1jh z4B5goi~QR21mZD2g{O{lzBD!rn=WOI1eQtSIXfxD=O2wd*RdJ@?BcEI^u2z2Z*Z_b z=+V@sKQSgY(HMXuuoK#M+6jq+f9uD=N(cgQp=(s#xJF6-Wo#o9XDt*qk(knUA!8e% zqeB()9imc5Bj>S=kg%XhBN?!{rB2YXjSxF1q+w37A=21J2;AY)neYw))!0Tz8Ms}% zJyv8;{+3*_M2t~AM zD!;?nMktgPO9O-Lo5nUm1MpoGDM6|fBlBZmBlPQKq|MW*I>#155dTTr{fyQ^C@xa- zO$vhBz-S1W56uK>lg!3L=au?4(3EMt`Pg&_i+@-|5XpMX+&U9@1%a80!#1fn)1m)6 zAw5G>9Zr7~hC^xXAjTF(NQzyIh0)K(Ss1x8RQ!0^7k$NQU*w=oZ(?aDB{-35DLL2S|5dP`OqA#n=b@DkcAMsLW-}M#ya7UsK+) zmqU5Wq=A4wkKC@(@u_);^2^ z%NAL%{kPlw?e_hh-s=a^Ojz!)cfv%GjCaOv>CgO|RdNHHuR=bPy)NVo_j!ihm5t`=IAPOpZ`gr0c9=pl3nK0VaiD(vZ zeE&)|Li|^J|Jo#v85xz#R6q(khu@@m8GB5ri@aO2pe~v)0?2&_R60gOIE?`WGhjZ8 z^y5YevnzqVj=DtPu1?x=6x3!35djor_UN)LYr(%MktXWWTOyMf$^-EtWD*kv%+4y@ z+>0-jRTLAq@tjkTA>yf&9q}%Tf-Fsi?jAL_2l|XJ6miGXt}hIxkpY#vU;6@hFLBM z%H0$5)H;HY!GDlGWbo`qDo+<6ER$yG^hu%Mq#EMXvbVP(>c{qj-TiHr0!lSN&3g0X z$)s7QdEaw(@{0mdnJ6n5 zcoC$Ph<0+Kx<{@fzTxc#oF*#mBWI?P57N|ZPnE@?{tCGkZjU9-lWh(gn7Ty&GaRO& z25fBUlc6#u(fXjg87_xA8x$J>@AM*>Vky@Z{4Pcqj%k&h)9$<0kr{#)qe(V0)g5~i zxF>TA8F@D{RUg&*OwjL~ASZZ&5brF{EWOhk4BlZ0km+M0ZwSuv5sZaQ?`fe|7nxzu z>aTO5L2jx1P|P1dl2HSfN{K5O%hn+2dx7UBr^Cq8WanvvQY^Lt_w;ycpL7wpa zqr-!v!vXMMXKIjYB4h|%d}f^#a#PkvxDN1i^8P`)-`?5jLB8i*AYcLrm||J3y$f%i zH?v+ZuP?D5!Q``xe`MeN)#bH~8l(*p#41nN5fFEkTu>cG*|Y)ND*c%fSwfTH?FLNU z&O>y+6yHa>$Np7`%qRE3llvr3e#kmWMe<9vE|%#=awU#^GbU6(G&Y|#)iC6ToyL1D zzddY`OXk%E!tb!2tF0#WZ7(raU*y^+TPo~kX}P?rCW^aRerqxuudflhSw5RQ#H~Y~ z!4fVUp?jz7ubMAQ;d`f|2VLnxH&a!jsC#EM=H7`zC#Cex&7NQfGK-)LJ4VKXOh~(< zI%&o^Q%Jbch0Xi$v+2U-p)t-XPN>o^I}BSxbSsZ|31ov>){PZ;I%UwU0~$t!`s^d2MZ_xv%T-}*8fx^e}2MXf}0yFE8aDEv6Fdv@m*-H?9G!K{Ep0r!97cKL8vwY15FD9@TxRTt)3*9ZFrB^^ z?~~I41Edm?IqF5Xv=WHQl((w8`ji!~ZS1H?4J$=xa>R<#ZE}r{Wtrtsmq5sJG5SaoWkV~EKI(y*S$xe4TEqscXyKH| zjbsQ7)uw95&{N5!2#s@X)j2^Y?@LZ~P-OU#=E??Io&kAB?e>NiOLl%3UDiVd5<@_0 zv_dmW=(Pk>v^bP@t50mF(>r}Qh!CT3S27yg+~8r@Xn|nZQ^;LlWe9wHX|xX?IU`_CPHKJ10{cT)tjKDDuDz^ZvMPN zWmaL~g5-WL+YLxm`x`Sn2`e|In2!-Py1ZOKmxEn}^<`OgSsd?{I~heihvqZ%{8h!` zsJsg(H3eFa%Xl+!cdjB zR{Av|0S`29ij+&v8RJKq#u+IhYbv`?Gul8C4F$rgj)yjVN}Bna?fT_ec|jHupM&YE zt1Zlns{&p}6Ixk{HzUP$sw)=G-z3$$ViKy! z{9NKK*Gm>D%zr{jROG!w1RD43*C1WmxZ&cj37a)znGpXbJ zw-z8}L5AYjz_dHAXOl6%z5xjXUUgAL8jcJG)RXt%s8veqCn00Ni_RK9;i&PV=ZQs( z1)*fnp@vK4g@d`1i(^2e52RjA1p%Ns)xPqVqlf;*aS`A zlc-79I1oTC!`X2lfN>yz$)yscP7E%3=$`Qy*SfGj<3Iohy@5AF&a_yrEWzB-uaQd( z8<29C5P4_K@AO8W7Xy_-yZcCnRM)gey0aUi($cpBSi;{laAtsZ_n3Ri;<2* zgEqvqM8|;uxQzgF?-wrq=finMtG0w{KcYYYYwJrZ<3Ir8KmZ4$$)on?e_SGeJxwy^ z;9FZ-srze<`WhIfpF#RnFr7G0X3zt$gor$1_9CHD%u*!u2n-;+WwQ97-|lYr_6`TW z&Qt0)eD+vK2Ec$EWuu0do*_5&ODZ2TDz9?1LeXc*Q_-SzTsbPdNS;N7aifkGo*>ut z3!Dq~ceWq)d{pNuY3L&Axu{NkV`&o)`f!W{(UGC!Yh*d~dm1-*af38-yvRqfN$ROz z1sPALeaK&Xj%?`H0vZI3+k20C{X>=~=Loz2m@S_!z@CDUP>npPU)Q8y$c2Lj5cdi4 zw6&};q9V&7lwKgu=+dggqTb#8E)!F661ev&bmRr{oGz}rDeI1-q|Vc{x~q`0Ij#zk zy4799ax$JGovXWwmE0#YftS46?QpQ3R z#va8D7u5D7XOMa({)NQw@RGTg9PEIH5u03U(utz@1xqV(<{7Qjbv|Qg#n=gDIo=GK z-|eq&55RV8npBk3AB?lk5~9AbHEeu`-P`yMTetC@8Lr{?!DNl)xBLpZ5G)_Qsrw%c zAWL>|9ijLb(c-1l_7GiL{aCMMo=UofOjhFW*;>I2y3tA=O9g8SSm9-~kVmcmx{U^` z*D_i_7Vi#`4gIAOc|wmZ4hqgE_2S*F_5pY?@_%U716>c4Thch$ng;Q8rSkWuKpOuS zCHlK1^0BxML0}TzrBm_)y{1dEPi$%;kCmGK)j?dKEz-SBJ#oKDQ0sTUcGT;Aqi4Y_ z9Z#d%ccT@S%l;O`fzlKjJxBwTR`F5Y$3>$WKJpffTuYDL%Oa7x!OqXpO|c1@Q!Fh{ zmj^7+wN;~F$zG&^ORG4k?q>iOCTCc{zzzDW?&=HO?j3K6{k6^ZrJynT3H?rVwo3|w z(hR~o;FmY4v3!!8^gBE4!2rzDLC3SR-)TLst7M<}&RGZ1wf(L2V8(a_3-9z?9Ji5EH-RZpq)Gprdf&Z;Ck8w8?3PzLYp7_&<9Z%Jd9=OXE z6B1{FeiL0IQZ+7s3Vi!h!{7><{OvMXh@TXpVen?}hMrJ8RjeeiBO;jS^wXJI6N9eb zZe@ELhDyKtnQZS$r1kq3_4{o3w+2UGfX(zQmf$`s5eelyskUyIyK#7H2J;zvq@NB! zZR-oLcluqX5_%Qx!ZiMdtQY89q+=(^REvcZ`n?nA*0*^3sJplK=AAoYEvj#~|Z z4L_kd?%z~z0Kr|Jw-jY4$ren%AamMD&rfK0!~>neE5OO3+yq6qEOl zP<^X5imF^iN&)JlT_>G3{VX(Yi&LVxosZ6@nt^0S{!4VrsPag76lOJ9v`W zM2Y@Ni9F_N0K`FFc6(%rFw%^;wU*>NNQB)Qc-kxEJnK=LBnn0`v6JGLp90O zd7Bvx#%y3A?%B>Zz6W|Vv=NwH<_#4V+apC&MaB3?(G=LG7X;u}lK@=F+|(5nVD{`S zCdQd!$7*Zx!wOAot9guVtsY}rYsIydIN{ebw`E^j)#SCcQCL%7#X=bCGG02ywgQ_r z_sNxNd#b{WN5Um5T}5pgY1%4k*GSV=_HDy84U;j|@i*!`gW!Z^ejZeFp|;a7I9M^8XPqsnOClkz=Gbg)`$&G7P*QtmB=u7}W+atd zO-<2S>XPfJOK#|rnn|*vR7V2Uq@qaEkW>^6Q&LehPDw@4KqVE`BgOc2Ci$JM9Y$`? z_sM%Lag2}bFa5hx4A019uW$ol|4<})Q#U8aJ&MKi+Z|w*ABh;v^@W#nYoD+2^vl*E zdvl*2@6mrSZm?!wIGK!bJ!X3ANsCoa?2%9mN8vG3+(@EWug{TC3l7TxpW|EspB%@v5t~MBwWVl-$Gs&w-Bx^=v zG@}exykjPLEs12!G36mS`GkU-<}s7Jo?n6PX_uOt$k$cDvj-6^JbA#7MSqMM0CCo*DjMDJn^ zlT?j~n{Y}UOR6j%EW-zns`wj*y`IrtL@(!-cd;wHmbxGVL!&}YKu}Dp5^qFA6VrsW zD%`bivQnn|*rZU^h&6i2Qsiu){El&ghFXlk;uf$mSg?sIvYrT4S|~KaQ$bl=rhFz4JIQqG*$avBRpn~6B^lKR%rCeV}k~hQ4}=76X-ahku7G0MvDq;&|or( zf<|~a9Vaxh#jMb1k%A2xOhyi9wrq#D@Z3rZO-i-wZ(2`n*;H0cw9LVV3GTlIlQ{m) zipNGK4sR$tWK&u3&=Lt79=QJ*@i0suWUu`C(kKA=ZNnrk+*Sk;x0X%tbx3)LSio?# zF3v7)j@U8D#_^_{Bo0huTbVJ@;tVX}3NXPW6f#e;vAV-y;$m`><_Y3*ZSw?^PzWZN zIZVD84Tp(~yG_Cbv9~r%FbRcVg1N#`W8z|KlQ2Pitql`QLLr!7c5u{~xcJ#5Ob|nB z!vvF%5tCmn5#&Rs?c3!R6|r!)8Tk(vY9Pi=c9MR#gl(20-3uOIQ-6ziL0Be-oimoS zHB?ZM?;_kZ?RU#+%1mBEU9iesE40^So(^2r^Ma;lq%*I5fY*~d?ibC3(v?%J2~81oh|*D5hwsRb=&Rnq$4pX z3q6MUqVji&I#KbTO$oQfD@vC_#t>>|PS&cCg?yGwW6u=&J)2jwsFjK0l`$i-Pf^<~ zFC?fBrUQz+VUUS|(cU7dh;|q(#%$;%`d2pB7)_LR6&V3p_0*%3*^<0Of7%+o98nP* zEn-^s8#B}M?HGQib&e!zMljNT!ZE@5unbD*&24T0?m?aWVLNqHDRB9x#fB3;uw)Ni< ze}00Op3LxfqQ(DLdybrKA8h;Ey9YbHT^2aod)9kqXK=XdFWui*UR&#Q7dqYLjfIu^ z()vPssk5@sS?{g2o6GffW35xWPA;<75Bj~$UccY#`rY0meiC}+m)>}*_B^??x!-@k z-S7IFM|&cN*Is+K=RM`!^nLZygM%ZVH~Ied;g-MI-q~Tne^|iq)tX19AMA4{OIFLk zn|ykrHcQCi_HGZ7GJkcy&z?Xotm`our><45#s2zO?5_}&(3M-NRjpiQaSJ1igG%Ky z2_@zdOI%4TaXGQXrNj~!6H8o3Ea4@TxK=qIgVsW2HcOJRh03`s#jBOGvArup0=Jp? z1a3E5^~y9v`pyz*6ka)#^_3Q6l3OLY7>m{oNHIr=Gr(P|1-a$&)m;TrPGu#f4q50j z0+ul~-L=Y8?3`kOtJ5+_kmqhPOYu5{mUUAXifkydgDgNYy7V0)Kc6e1!t&bklE1vM zv^q|xz)ag2`z+HtbXY{k{@xx$>>npoVA(Xr2^Gc(6@r_sKTfDHPN*>8!rO5|g$tI; z737t-B%Z(`4I|ybIH3Xy*%J<3I!>sNCDH9Tp+byKJe+-EoKV4tGtWe)aY6;5GzzDQP)G+l2a1v!BqmgF3!YAd#o*N+Z61YhkMk&q zJd08bR7cv}aUKPggi~kn2vSIBZQpwlvH2bHDo=*PAjLTruKu^(BQSr~;OU`7FJ^Z=DK5v{gAw!lB zCx(Wbyk%wiQS$ot3E`Z#OC$nqs{L&DNHYr>)OSCaO<%EJHHvP6utCd6!`4sQpyIcA#77=^^_ z&6EjboHZdb#`;)ujB$)d%Q$O-@+MMZ6RsQ^XH8(%?McGZA(Q%yvnH@CX2GG!q@ccL z{TyrAy*6qsID0@I7PJSi}poUe?tCI~6A9Ta;%|<#CKxJlJ052AyVP?8Db*}0@lLu;ESn1qa&xWfC(T{eFp zUr|A9AYTzrAjWBNoHap6ZT-NLK?ZY;g>{@Y;f-E@_vo;FxV^uZI?IRHYh!^OXHCch ztk|$)fgNW}$OEj{^<#k@XHCchtdN4l0z1x{kOx>HLx}};oHZd2utJa&3+y;+0<)+Y zXH7UfI68z(0VsTO_-ZA;2NwxzS@R~N%SPhai+jBa`L4ovi+{V@yDIR;IBUY;V4O7} z5FhFb&p2y>iUA-kOPC{!KhBz9Sa&!)3QKgHHDUWuC4dq0M{m_SU5m1F<~~-pQ96ln z)&y}}I*bxyjOR1Z7$-~k`k{Bun|b&X?FYU7n@5meVYgR< z)Ce|84Lc8N%H-VWU_=yVAd81-c#i{60ASvi|fO^6MvMc5qg{T#>VDKePOeG ze`#T5z1Ll6Z}gfAy|w$x?UmJycCWcQ($okL`nx$!jR1j1tCewT1hg?+BDSEmfYOgm zYJ@tZMsP>mx9l$E#Eb}a$cT`s%5fruP=qfA$fy$`{98i)u|z)W!$}9JWO{oL)*liK z4SN1g`-5KJ??6@F1DtDunL@7%pjYQ`1t+p_MqKm?YLg4 zKtb+#Pp5T0q`A^BluF+&UA_gcLzu_KH{tcSWs9#Le7kh%wf5m_M?0bC^pMzp@XneQ z&;&|q@ivZzNkoZmlar!5p8S_e(CBq*H(T7?9w$Xh8FGtVCmfJOC`>{=M&TYJ?-4mA zhrv@H=&T^DbVBV0u{S;bmtOgb9Ta-`E7plonlVqZ}Tw=8_cYKvxu|#%f`K}sE&e`agrHxOz7N~vQwqA$yFR!G|M?uhgsLk2CLAFJZY6-oY6jr{oua_;@D?VX-~f4}>Izp}oz1euQJ#3S){ zXCJn;&OY22di}YwH&1ThUw0uz>F&|4zYV`~^MV1CxkS#3G6y^DPH$^}r^{cxLM}ue z`}Mg?p7n+kh;;HQ#zlRP2Xlt0yB zkMA5o*0Q}r9~GM;7e%rA@+4sYvI?k2&Wc~QW$L@RGQCJ<&1Zm>?P9li{szt|Ir>BQYP+0reQL(~u9zw*Sy1pVzv@=WTkc;`ib zQSay|rZJ5!O!|E4UYdaEb0_6-Q?6YgGYWI%h*+YKE_qsX;Wf_41f0WC>W;P8VupiP zZVJYf@=?Oorz7XZWpc6QnE|lNl@CkwXG?NLE{aY;u3E*=ST?Z+OZr#xGwD^wP(HFTQ zRQ&Zp9jcb)<;c)Yk;&GC>C@^dm#WjZPJvJBM5|Pye^Mf61TDN~2yk$)vwcX#DHd)m zn9~YoNq_KMlV*=GZONdS_fhQ}IlTu*tLXOR+!SPxk?2QwK`{)18cSbOF3>8yFWG@b z*#b=afZw#WQP+5+ZevGHYPddmWw^Oev7&UFobR+OvphL*-X%ng{=N3zmN&)z%1LXY z)yP4~12wbQMX)Yt5gV+cg;OGq_C?mTng2O8WSgoXSavLHw%7^eWPrx`QWT8bBJT@( zkb|Je@HgUzKhJ=?qjr0S2M{BMM0TFjm$UN^6-W$$hIM8&_;?7mijN26RW>qmryh|$ zv7Juu^x+^vjK*EbXl(CwdtV=NC!3((FOiy@%b2-wG>TaY*$=tUe8iUfb-Izh&P7g0 zu)$C?zW|x->=yI^Du)aXdxEJ`b@4S<#lZuyKL^-n| zfe#A}@9z1lip5d+w!Mn_q(+ukir1sNwlB-;%A%?R*0!!M;nJ84^PNcbI14@6KEy;! zB>sa)eK-|cEB%^~Q^nr`5#mlyZ@ZVAGscfJjT6pC)|4crR7<@%MH|3*<`GtPJhbT> zn6KHcU!Ij0oL`XC`s!*6^Wv(2SI?ASy;+JkBE_}ioTd13q`3AevlL&66xZ%nmg3Dw zaqWpT|4jH6?_Dto)ntAy@s{f)Un%B4p(HBCjmNWJgLG-*imyr=5B;Ti$cI+^#orsn zrMC7oxx}RQKmo{KL2%E$9QJGq@B3ki-sj2dB3(WFO~KQR78Vh?*9$ zAe8*zH2K4_lASV*k-xT9U;D9;od)vVo!$T}Y(-K#k>?J?rw#H+zGwO3Z$O&wgG{q~ zwAXEeOcODXOfs7F9hN*g>pM&wob?@$6k8;#1eij3CwvtU@TS;bn_x_`Tv3>kxs_L9 zSSxuQWUb=km$>+a497}_o$zjWC%i?T&%6^(&Xu?Yj{5foY<-l>MfSCmWQv!&0sCJ3 z@0En#PsRQ|-s_|irqI3azkG`PQHk8Z`g2f~z)sNb`a6TeU4M0BWz|rRb5%Lm*>1t2 zRi8WJvrvb?XHY|gl_c=ALZ+BV9~Q2m0jFmP(flL)ioe6wV0>rG9{WCdWI|DcO_++pgy@|sh&x;ZZkG6tv=q>K*`lWQR!Do^e;1GB6w%_$F$ZCx3ys5)EGdtMoZ|^ zL5Z9TkrCR3%!44bjVPjnk)+j7Z31HOzm zzA@#g(3};bo&AFnIbrB!=Rz&$VU4kO`w{430pW-i_AKvv&|?j8YRT=}USqJnGR`$Vo#ndYbn=XD7cX5S7Z%VBm!sl!$iB zD?;j-x<{@fzTxc#oF>Y&hcip&H zZm!gEPoZn5hPW$*Y%@?YQpw{)nG$)rUu4SPb7ZD*rd%#xDwWv(OSQ+zRd8u|AB=*J z2HSfN{K5O%hn+2dx7U9FSyaKG`Z3U?u$j8w-+r*o93TYg1Gpcaseue7qyaBpds3?s z${i&9{k@$J;FEBt5KfBF4Kl&|2;zvt-X_?2u=E@|J3X-J-30>Jk5kN&*}L%Oc_p3a zmd&f{t4r)hCeOw{a{2b^dUK-&sv$wFQtyv|xU1xX>M*jH25_tNXG&xVO@_A{Fm*c* z(fv|J4J;=Tdj*_x{+LoW8bW~oV%F|O-SxlwcqAFGL83I zF4@=$$(z8m1ZGeUf!5-J( zJjAU-Zlx3vUn%g_d|3+LI|IlPOB8kQtj63sX9Hm>usEZV9RKfNq>9?X!sARxUqzL; zWX!H57Fu2<=DJ3Iik#{TXK{Hf&(AD&dylp|z1HnF-U4fv6bx6Xc1bOp>01OA@e#A)HJGI1@TwK-FV&By&St$#J*BHDkjh zv*u=wnqNkJ#&$=ua-4%jr|Wi%@%$55DdSzxNC$NoMH;Ax9#x~rGAe@1c5p*0s0cFE zxvm{1A;PKjq6zwuK{5I$tQhT)PLc*DrRhkV*+}gvR`7-Q|simHN{9LVKyRvd~%Yt+kuW^>$;e6BiQz7HUe5{udy`!gD0fbcNh- zm`R-UxLB$6d9V<9Ezz`v)Ew1#UPR2+dI?V2+c8xNGnhDsHz5!N54y?1Jm`SE??8p6 zA}ic#eF;odzCdzm0psWwe#_*~|F}f{dYY_ZO|I|X2S(_9rptYBa3u7(a4XpIH-WEa zKly7bYfJvxDyVqRku%8h1GKg0$qae`{%Mg%%xoi6iuvz_9)X6|TP7${UhnG%pkM~~ zkAaHsEHpHR%okpIhTLQ!nzmU2R)6rdqh9YDJ)g$|6XiQc-Mzj3-X0WvmgxKxK~Wi# zJ5qR&=vYvuiHxK6C&+c(k#F?+`vZSx`(e*V9atp|T|_+>)v0d`Z*U_%(BZA2<7;F& z^?MpOcyWU?bG*n$u}SKwUxh0a&&&!fK1Vk6Yx;;~&tgMyGPVkAk80#e{kkRvLoV*s z(9|c0j{EMTvc`z=(#-H+f#`tp%sNn~lEs1J>RcfT9eIH~r#qtQNFBl5U4;z(xV`tN z*FR)r%c$IX#9_iqI2^r59hckPi_{UQ-BqNHwd}4UbyQ(@6{+Lrx~oVXDHf_Q_9!mw zSKE`EQ|g)c7xpiB$=pkr{zNW%F%3mg{DP$wIrEHG>M}Z}XvNqGWjVg?LV*#%qqkVg z8f<9;j>HhF0%Wxpg5|?Eb^il!b4KhpK1MW~nOgSTwbhTk4a`$Xw~&dsf6vwm=JJhJ z@+ds4EnsfnXd#a_;dL7gnCmxMKo=DU%TIr)M4r%N`xE?_{+vFh7ws_K8hRWNCw<06U1&cZ%%S(Qgve`d!4Xn7F0m zY0{uo4x#m1V8^HFH+ql;D1B?Ax{r%SHGJeP7`c|7O|ZwjuPxmao1h6*TAnTsSl|Is zje;e6kp?b(gQdEk0bH1zVF3f})mh!u7rNa$-W2;w(R@?0eJN;+enP(!o$ZpspfrQ< z4v14HHI`43lYVEX4Mx=02w2|l>~~tv>nei=(|a!cB1oWh1qQdz4^4CMv+(G;)9$?` zyjsBKOS9e}G46v2stPs2dbxNPnf$%P9^CJN+C(X^+zo}yBon2|KaJS&RQ>3ImZKP3 z$_e^Sbd5;WxBx0p$)<+Ep1-j~zg;E^@slDn4Bl*bV5pudRuYUX%kPvh0)Ql8OJGr(aO6+&YTZ!^r7GgH4fNVJ)b4S3N6qGuMoz!9kPHU<>a`~kxF(&k7 z`rnOHHq^1;D^^13jwGzQmWU>-76mB(KB*^3P_+Re0)-BnaemN75tur>=XRiyzO5X$ND9)-A#m6Fm&7V9TZl+>k~tzS-Uj z+-w|oaMRNwY5-NDzfvNPxf%d*kXLyTd|CUi?RTl(V3VYx=&1?-Tj} zNyB=u8=%SkG^nwp}u)Fszbm)y`LHIrmT zsg4AyNkx&SA*m=Drlg{1oRW&7fl4Z>M~d<5O!7NhJB-|(@00g<(v;HZ{?flI#qf+f zV2cPh5cUriy(QeEm>D)0m!Nwp5;2@~QLNYJNGOH_`IsqQPNG?m*P|Rvz3yzuMl_ZMw zZt0*n##FLGlSu~dPtvsnZ%5bhUh|guWUru6Nw&z`>N6uBV7plm5t~|BGE%IV5x{M zgagY)^l~E6L-1g!h%ST)%SQA{BGHL#Slrf~VtNdib=%+Gw@tDzhT(x8SO=Mo$iY>yTWU!3o{XY%wb|`n0e?gUKjrK?%>2!lB_IVC)M@wwM(feWuu;!DM8HrfT17 zgvZQrLL*zu3XMK_Y|vmbih@RX0v#tbvc;^>Xi0^+dRP}6oLt64wG+2!(rm$ zZj&%U?5zzGOhO@;V6Je~n7G*5Buo%rYr_PSPzWZN9UL_#E`Bx%6U5NkFu^2b#N<~? z1o_Zu`*yiSMJ(JQG!6Ise?xoz)8Rr5#2ETd((jhA%~GU$!2@jSZxJtGqxAqFx};5v zgNl3?;ihT7TUJx%QKww6%3UskVQN@K1+lRxMEHeRSpU!gtL$EY28I>Q(cmBx?1}uE z0IZAjVt|!fWqx2q1+l;`^SBndfR)`#3#`N{9k4Faivd>dxA}n;6~qF&k{?*vy|lne ztkMDNBE1-3<))n8FR$`S7SQqJOfvv{wu~LYUA6QXAEU>HjftB4$ z3#`N{9k4Faivd=OK=K1CDu@MkEkCfbduf4{SfvBjMS3y7N;yh?U_}M7z^>;9R(3Bf zuoA0uz`97!2JGLIv3;$IVZ^yUVUyuigf+(7hE`s7(U+Zw^w`4km~tb{@*saZRjZQT zM5?uwH6;!4^}4tNTfMFW&sNs8hug}!wj~rX2M>dC&Qs5VXi>^CyQ1}=G4^?r^!F#R zXP~IYKF;W(6wF-3-3JfYG{YjA;0L943m&*jhIu=8$uP6ROX|Q5G8`5o$HXgw)|5ue zor6_Gm89RDjIIqi+ahbjidmQwaMKq)+T10>d$qe{c=O>UBeP5pWTJ+`i>Xh?W1kfIJ)2jwsFjK0l`$h~S#7txkf1)84k+@5K_&)9 zdyAwZ+F`I5v!RzLgzR_V%}q14tH=n*s;3^M%$DRO`qS3f;E0OgXc5~=@RjXwYOUrA za#N;EJFIX%Ha^{|2#?lY0?%1{Zcq-2hS>CD|tR#vI^O#Ee{4p#l?(&31Y@SPcCil_jzdW&7-}};r9NXzt@J`Qcrm| zeP8|LiWW<~r<)%jnq=7p)I)tqD;xj}5-$I{RfO;q+l2pLwizJc*1D;kOv)OhV>L@sV zH)M4Sc;}JjH`*nqgGR7cR02b<1ixm@S~LmsF*KBA`UVjZsNqh>15ulKkTuKd-uuI$ zV82W*7$FWJiYUaH%zUS}TOCfg^3@j=ZKmXEpr{ZtCHj|;)ZIWRlrZdU=z@REC@kO% z81PW0YVj+&_S=8gCQ{a$v{P{7O#TG|9V5+|nY2y~zJqaLedwx;@{oG)#H04kQP1;k z%|bex2_tFTcK|!1AzS9rj~gY-u4v#Ki6)(Cx-O6zl~Pc3wp5!XWWd&que#ql4}G4g z{NR*Vsr>CJuUz?Fi9&GXChAgL_DZv}3OD!SOXW=RL->z7AAP?>o=Vvf@1iKkQWw71 z-`^GAsJT7R&!nu)Br6z$5n%0y^B2|&-hU!F1!V&{go*Yot`tpMsG2Bl5BdS*@HMs- z=>GIw6dXD{(l*$XY5d*x;2}?G z*&`=Ks(4TSOGA0{P$wm8q0haiBX^$52Da8>M z2}Sf#?HoDH(yWTEPR>nHKv*W0_tXb!rb9^%dDMR*mk!e22 zi(cp2F`2VDN2Pbj6w9gZ@xLMB^W=wlOQdhFi*BBg@bXSpSVN|$J2mb1$O)MFY-<;b z?i3)w*THG{09@8LumC-Et#6alvV(rZHBxy}*t|kcibVAi*3-Q2IXn49foLN%@C}YB z5$)teb&p&}e8bxf$QG+OE6z-*ouL4>hkHXp5aDopM&4r?1vd9a1R<aHgx!ZQLW99OTiZK5`dGBJU@Yhp;>Rexy2uQJR)3uf4RUJ> zgdsaUIK~yvAU9X)xTnxHR72dALbmDa288&8Mwk*RPWOwXbPp0(&NR-H%jHX@68nFt z_Bgrf@9e+d>-$H8?Y#&708(Few*1{*|3MG($(P{XcFAWmb-%w2nO)jDzQ_>|nds-v z)IfA1zmG6ye3ekY-vgVL{k@$J;L~9Ls1E@Fgha`kBrNCsB}o1N)e1Ey~0;i7mH~ApW;ajDm2VLnxH&az2K?GJxorx>v-Z>k{YJtUJsY;UvvTO>&o>0QD zxm%Y(urvks=;it85=4_>|4qXGS?-iGs2wab&V=+;R7qCRa!9PqH3amh$SIj9U6MUN zv)JuDVxemw&o*$*DHx1W?UF)Z`W68##YAmtV&cQk()X-JFQJP)8si`+V`?t_%g8bU zTeh4-IGGHvYXh>PgVWYy6BctrM0qAtcO2J@ja1BlW6FC$2>S<0*&=U`dtx*cO| zn0x%FEkX)2_t*_vdwhw!3q}#4@jt;9aY_lpX;v1b$RCzk?4vN+X>rSmQjASQZ?oRm z*j%YEY_{(&Ev&5fx?s-GYcBNG?k~4jRyW$c=4zZx1FX-K76~taQt3GoXZ%BMI1<>O z^fU=oVPsn`0cf{lsuYmM<$)JGGNi4aEHZ6<2P!NTS>aae zF6d!jcWUx&Q!kEe!7G#hlBBI&X)e{r+S&>FXlQM1zT917wXHXE>x|@xN-fBw9aj7Z z1qBG$iplXaR@)weU52{fPLavhgvqJK3Twt{Tc#u)fMtrbmou@uu6W#ww>&wgC+90; zwQYfBBazFs(mxhwIO1%gxIJjBwncfR(H(!Rwq;7ivD!AOeNB1KioQ5j+eR^=W3}yr z-Tm$ESZyn`oTel`nbS^ftyX1%&9T}xoq6F{Z97)mwgziZ!ZJZ+W$3}nqCD7k~A)k+Q9Q{&}`~SQ`e*FYV5rd#nUp4f()1DLmJs!gb=CM@4}EXI_Zg(W6+idB zegKxZ;Ge?$hiA#O{_e9100|ing?;moM=%e2I9I0JhgtY72cN-jnL^*yo7_POih}zx zGA2?L7*9ov<#1UMAUOO~M7Iq;6`@(fPeokO@KX^KGyGIUbcCmu$a(I?0x|2vzW@_F zz94;jjE6y46deu9o+D=@3M`!1T=^G3b$cv+)wi&f+!D5u14C=(a-Qs24bIYMby%Ur z;eD7U=+_9*HG1eX4VE9hqD!zgEk;%@!!yRTge*X+^i49WD+C6&2r`byG_KP^2<#m6 zDlSt5DI;(y`-SvQWf7&*rz1x0HS%<;!cDlqb}Rln3xYtd7p zHkO{1yUkJgUAGcUv8a!Lw(N6L%cCZ9YMq~=)P9kWZ}j^61Ak}xVc>!iXIF+*=p~4i zF1^Oj-#Q1gURub3ttbeRUsHR|BHMG6nA%~Ph^sAw5uB{{^sQ5%nV)EtCTO3W6k8aI z^?E_~U4zHX+vr76n71AL#$dby_T`fr&lp-KX)qVSe7T^xFhPGUmr$LCGR z)sv<%ymqI7U`wnFP@R%t^>pr_u%R+Ndz+=Ew2Yq{*!RF?072ClJyZ=hRCNrM=!%7y zB&gmcrzKS2rl(M&2rS{^I6PR(qX%o*4ORn#CAu;kSm3HEal>FObAQm`!CDzTSSxO@ zmN8hOE5m`cYz1qjAh4Pv0c#KMSWOJptmF^^OfX!cGr>>#RES@QRqzi97UGH(u4X)3 zF$ZSF+_qLnkJoA(UdF~HIx`$zO)FljMd7tJdc4-+@G|x;(V5}!TD9V}Ruo?AqsMDK z4liT-5}g?iuQe-P>qFp$7W=xcTy8AdPb&|PV(cUv?#k==D*qB80l%>1uDvmQ?T8%- z1G7AQ?dbdt*S<1*?dZS@*WMhyc62_3YhN9{cC?#@YhTM(`|DcPSInLB=gJ`1OhYyy}O@(pNu3r09o;7X1(rqaPw_E-Pg1cl6nSrJ?^k*oDR@@9f^s+utn^CU>8H;akdnCRs0aMLNu z3f~zYvN#)j2xEHA42HH8a&SMdCWapS(XPccO+cd@8jRM5nCHnIvL(bhWp1)=xa2&a+-qr%6cdpx!}l6jp$v&jSAo`2{SGu5+Ea=a!(G#f35* z7Y|91&Ei7Kj*ExnsAh4Ye#gZ_l0>t((97fEAxWB9Tqx{u@sJ?NEG{(pxOhm8Vip%F zeq1~xMt~vG!OsQiu1Mtb*N{<#hXm`KH?BPmF}Q%`g#m`Jr7Tefd^zlW{m?t-%{=^x z_Jdyk%_A@>+wIjpdUjb$_cxZ;);isVPIq}@VWqybzR+IktSofadu#0`i%@ zSgw$}XY`(sB@P(J2At){3K;A4^5;0j`aR5;CS@IP7VH*rm2fivQvo(qk?Wph~ z!O1XjqdY}gwDu=Z?l@oT`h8UADrx8rsOO?O^^L7(h}V4J2UcIu@inrX`aO*sytqM{ zIbP(W*d+DTuPX5bwD=s^(60qF2pYHd9`*W%EXdgr1XyJIx-P(;&Xv6y(OJcOO$vrw z*c?JrpCCF#C@yP^s9<%V^a9ZtbXB@aSrR3%tV_E)?*;OljSl%n93_I`09qVXh>W?h z`H_`HpQ{Ry2SqS&lb@=C`n@EI8>6Sk%InEUf&Sb0uc4`4w^@SU!AH_dgggp|rXaQ+$l*>wD_! zb=Ouu78IDLl5QcBfZ#n_D_Bxsw2~(%xwQo>FECoj6Y=|X8x1%zpV0yeCg?DJqrX%l zPw27z34SsD97yt1mTI1wM&%01dZ6opavK~?wx+?(u2T7XmOJGyO7wS21f?8~c1S7? zv|jF-?mk~1Fg5jUpW4%s$~@QFgcb< zl?gD2Fn?Be6><^W@ut{cisl>poo~cJW7M7wSI%}xVNjYu7&1wHlAQEAJMF=MB^>nT zcJ@21=XE!N22+R1rN3xo$u_us&IOu-pM^)yGXWqklV-gkUL3l6$li!YJn8LAY#HD0 zL1^>h%~5|EvE!-w(Su0j#l*Fnpx;E-h*XUWpaMH;_g)_Iq_zj8F42EiCJTC2r5FZp z_HJlkaEftT1{q(Tk`7c&4!=Q9M^8D*L`q>7uV3pRbAgxX};?=&3@HJ)7cb>h&~Q@#yQnfbW{g& z4>nCgLJX>psGyj%q4@rD+Dc;_RJXnxrVc(;VpXQ)V$W%}QYGd0w);43yC z)E!A!Y%T5?$HmZ0Yz4_lKB?zMP_2yaDq;9$+=@I=?*JaLJ{`xYP71IvJ6fIYTDJFUtR3SQ<9&jZi6$AcedoRd$ z>$qqqd6|{yuawARt_DEn<5gbV+28NKw%?6GB~JMD%x&54UDf2ZwNY48U&WFZ>oQ(C#Zao3>=$4IAr`192Qh3lthx+gpx=|nNSj$$tRRVw(|)kk$*r! zN#r$p4wqI#qlzs|(F zv$ey>W%53GkNfGDM)#NgT`7iVWB~`^2EzWKNWP|SPL6vN%ZCW6DP>gw*Ha`%G)EaP z=hi-7<4`>1vULd9-1naM=sy@YShFvjOvbn#Grjfn+s{ZSWzJ{TX4)2uOv~dcS{Gwv3}&qBm;MH%p^CHNJhK9T{kc!$#Azi zW|CKvNY;$TXhs>Xc*jigS`x{cW6DEv@(Beu&0{8cJ&9z^`Z-8;pUKq}XEI!PkD20) zB#Jc)n}gziGAtadTnOB|hbI|n13( ziHukr(Ysi~B-M@LCY(~ok}6;wOSMyGQ4)W{Fv>I9i|9Jt7iV_C)ABB4MQBvW2?&a5 zRbqU|s50Hh=8dXGtkFxBB4-2TcZ?G>)M5k{w>XNyg2j8tRw7hsq0nH%*5iT3G%nPd z+yrR!Ie_d`l;wzFd|cG55pEXB;1C#V8{u|b2$$P7)@zSju1nB#;-wwM(fee&3#!DJK#jd1rlPH1F{S)tLQ0vj}# zjG~|sZcN7sjchS1G+Lx!g9ek41DY+{;Vs;=(n6EUYNSek(|T&lrm|w9Wezq>aQ`is z#PN4lJT@|McthbKo63rZmPpv}!2QpNhhh34d*#=cMghog8zyn#wjzkQwQPc~)A9-| z&IwoR;_Tw)h#iw`9B;}=;=n|YN)s(>^v`SsD%3Ur}dumuk1+gCeBE~{2tbgc$Rdz2x z1H+2uXs~GutmM}OU|pma1FYOC^8+g?hy_+ep2!8P>|R=6C06Nxb&*~SuyVi653Hyl z7FZFFBNwo;duf4{SfvBjMS3y7%1t{zu%d!kU`3RYT)@iir3F@El@3@J>1l!GnU$P- ztQ2D82Ub)N3#^FUk_%Yby|lnetkMDNBE1-3r3fTHu%d!kU`0fmT)@iir3F@El@3@J z>BRsm>TxG{!z}lK%cA_6!u&aE3iH&gh~P%v{CY2M^dZ!y=mC2c>ok9=J<}c{_K> zFtfr-;y@os0kjlaQyQ(uBL?j=Uio!&NyBh2zYR0MA!G<_}cp1)2istAZlWjIcaZu*ew|2#=%;En|O z&P8~jf%iWq$@eE)=OcN-A%{>XHVe-^Vk>ERdBRqXh2x1PB#%p06bar_WdWo38H{T_srUu~`~wC^{%3rp+w@3$M>_05%~m86jJ&y!1=`+ZA9`KP>_zOR0A1d)f( z-^(fE$6u>l_vVNrq)F>qI8*xr>}PgG{S>m+9Tz1gZ) zrXgmx>s+_)67|ZN?5~j6S1en|jml|{5@)D7=y)Ytl~s7jTu;J5i%oJ<4G%t6oJX@Ah``29--_3a&hXMZs04gB?HW7Q)pnGPJ41BfucL~iW14|k7t;H7r= zQMCIh)$i*eE!gQbfoLeA|-`{Qz+FjO;>y-+;!UO1-QM)0^ z_P0xyZ^7#j<`J~E?e5#M#n%tMUApvI`|!1+ozQc7NbEm&XUz&|0wp!KW|vdOt~lMp z4-R^a&4MU0Cq;KW`7f2A(d*W3wjQ;2jv#@PW!A57@zma6noR-f9aL4*g>I}zha#jwRv*QZ+E)>=Fy%`4AkG> z@%N7oVHJ9lPnT*JBRs$)nJA7uu7$bdtK^D}Hw31xvE-bMj#=9Hq-%lN*Dd6;nebjo zeUOtXx#%B3k~GNZ)$43;Zuh$WUVFC(TasLn}% zR*MR(RichGu|-AGD$&0zg;UN=Bo%{ zW>&Frkg&As?iIfpU$xS!?sOXR7P9q}&Ox)<6W z=x0*aW|B2Zti!9rQm`5Arj8&dmKQD8#E(%#a*dC}A>Dq76 zRj&*}Lb)1G-#Wz)1;_>U2PJZX*9NLLrn;xjB(HJT7kl`2iv%|BquM!gdhcjgB)f$P zIYj~C$OY>xrzBWU8se1Gze?XGr)8}$0aXLkthb({-n*m>vInmW*M2$^w7{=xGU0EI zO7D=Vd+og~kN*vIGEcs%x5x=5AgPOP8h6SHYsl2;lr;hrK>e%$W`A*qgvT;)zBe7X1NO=Ro4- zUVrGWf9C`_!K?Mo-i2JEFhvG~cNi`7v1n_-SkNcLk5PJckr@W9{yG;Lz+c_Pz`Zc3fZQw8xTBBlqsR&bic?Xd*{eZ<4n0+zEmo)|CefyldB-1 zejj8!M}zIX2mavw?ZeKNzuW6S0BJ19#~y>^+H9uo_qQQa709!N>}&u>-kBPRPULr` z{QW$6T_`BwZ@QzEsWY`@9sej4_pKA$W(;h2D3^Ep`+GYd1k!P#bB1n^3EoGz4jlG2 z;jmyaTz7VQJKKX@Ab|Zib$`G6fp_7}^W+>KP@jGGn;R=D>_^c3ckz$xJNt3DcAv}$ zVn;~aRdPXf7_X=~fG;&@jA6G2wyQhRn0g~rmjGYQm!#6Ls=HW)~PkcBnI`9qcri3F)h-65iPWWX2K<|yW87Q4MiEK1kyH{JryIR(R2s$EjcX8INZjpambYGUHU&(im-%B0Z69*uEU zbybyq8Cip2Yrb;`CzAorgw7XG_1J9B+z?UT+|-vATr)PzGiz?@QB7U*%c#%T6mM3J zvt`CO#`q&;1y`7f+iqBg_a*Xf)R>JY8cWIZ#PU0C%dXXKce&H-bQT(G&CP|C_G)io zy|cc#u-1-oMjiR&y;pqFM#pKb0o&%29$md<={z=^P8>DgF(n^#BB#E zm^@N*RO5LO>00Y0IACwbR4JS@;?$OcM+QCUWMO(x;OaY2VX4Rpw_0~WQ~SEpo+-33 z^Qjj{wlF#&fAI?W^%Er7fNykvE{2M570~PJ@DsIn*6mB3C zhOVhBmO|63zOk+{&2R8sYKnvJOjA6Al(eFFkLN1~u_jYH&ys2V-DefB=D5oP|K=e@ zvd{Q}xiVEMZcujzpF!PCq3`NVF4u;l03JGnR0YOU5rrUJR=CiIp9=r)@KfQu9DXW1 zZ^KW8+i3Wy@Ogx%m&kb`^=$|ts1Go~;|o$GY&;CgBL5*MdybruC@_bcxiVNO9gjNx z7G|!NFmoj>8)%|J9(3$ywFUm-@S^Vt`ZYpyjUM_;gXKrB=n||=%h~MWS8Nw1`X-sx z6#_e31Q|zU8rNwd1a=O36_+W3lo9A6ej&XsBBEmYbVN2%BTu)OH6M>=68|ZxUAU`O z8)!bcN8|7#RbnYXpvlItM4S6ePhF1H{I!sXb?r?K$e|=3r=XwKjFt1eg$>6k8bbhQ6Tt zuEFD65xpo1^R|QE7>q2F(xk>ShSo_M%mujh7Bm+o=&uEH0i*`TTtKY^U8d&*A;AOP z_0L5neuFG=4~J^{zu_rSp2cI`}eBPLv zJlGZy58%45$D7lZ0jg8d15JlZ4&QI-+1pI`WEnp9X(Sx<@2CIR=5?vV%EYNUD+%QUC|db1cGpTrAy*qUEx z{|hvTa5PYyIRrSph0GzALgo@UZBEA+vnU44GYRc8YA%lx{Z)kZzag&zB7GcEb>Fhb$HJ z8aBk;yd%N9(97E;xeuxPc-WK?+F?vQpUw_LQ>RHtZFJBI+vuP%XB>+=x{asC%O= z!R|GG(^T1dcy?(%g7vuU=0_7#!;DW_|&Sq3c{#?zsi$W^tiI$Hha! zW3#wWuH)h%@u^u{Xx(w~kO0vvF4Xb3cu0U|78iPYTs$N{GK&iZJ}w>-pP0pkW*-+1 zNe^I%&T91pw|sP|$>*;jTM7@!(>Y6AOCMkCBV&A3-@Jc+b8WT0P;al@Us$<+zp>C> z+GxP1I+$OrcbnaAGtT(xB{06aOD^9B8*WQntqpGt2T}0-pqwMf*4p~Uh7X21&9Oxn z1daIDYSHz7Q1Txq$!GA?;bAvYhb3BBUTGSpUW?=!3oaBBVQ2<4>e_(OtCJi2g?frt zgv}qsY7(zBVqQ5N=C2%g8PfJg zGn;Nz=r}xv3Y8bqw!6IF|0RaU01Gf~>9Gjx=)+!jG4SZfX-!h412TJ2C8EXvX2k=2<0>h}eQdKV~^8qG8mMTB1kf zlJ#(jP?T^Q#?T~AN0TNWNg1%1~(L0FQ^xO5JcS)+jlU{soF>MlW1&Y zs^cl^9Tz=BLaXznd~bUX*sDt8BskkLf7|mAApx99J;0`MLOi-ou7IO6xU52qx<@?- zaI)8a0RFh!UE$oi8Nu})dDG@+GF*7&6mGT)WE!*+o7)e3h*NmD?_KvEBd16iyh>)l zc94|ehBG_SnEc6!_0s=Zny5WRN}JyNo7K4Ra4Sn|4G8$C=L43c&4tEMk4nyff>rvz zm5hjj7r$cJ93Y&ab#bZZ;R!lKm_ZzMFg~SG_P6}du1Ab8+nMr&I2TP`2ojZJTgocfYZ_p9-@%cJ~wU z*O>@3RREu{yPrRH_hXL5VIikdgN1w}$L@X^z#6;zWdLjJ?w62@aqRA=2U9 z2(PibU$*%*cK1ufYwYgF(oLx-vWjDq|rV^GHHMfe}ch&O24HN7|6#ZwgL|UdZIL0o;p#>Xn6k`Gra#_-h7dK zE_tE=uLSnNhe!QA@Ilz?a<2(CmG%dN-oS6J)i>zA)vc!L7_mV2Kyw=Dm%z+->dyY_ z2Oj@R=f2`-X$ZUf!cv2*gspKuMxJn`24RWL`2QM>>~nfF2_gQ!Y4U$g7BEd3&8ByT zoZ@LFSSkdtRX@%BkeiKVyq|~!9r#zaYpA_Q?>6e)6?3<_Mo1SjNcH??V`U?<-OTA< zn{1nk+i$M(dKMis>)WOOpS^dFjqFPA#CTa`vv|7M&AueN*{2_3Q`34-!(l!7D9=Oc zo{_AP)Gakz(u_Rnt76q}V4wL4W}H$Jl)5b85;@5Z(DXLq6aa=*?WKQ>BV>@!cr&SJGY;EV8jIu4?%76*k%SIm6qU;G>b zb@~;xV|IJc6YHI($ELqZv8-)e6%HL14o2v}p+mlZO3XvRdm7;2Bs3sQ&j7fmBIr!s z>C~CYVz9flLGK7JCnttFb6j@|1Wpn_0T|yTI!YjaAb;>wP(uP@DHFAuz-5>i;sQJ2 zwbD-`CdM|ps1bqcw5}<@C34_V&RJaC>(fA*Gg4D&Re#{1DJ>%tH}~hX7cIchK6J7*s~ZOlI&L$yQ$o!qmN7szFPJseAcz z72`NW-78{9n+|d*|8l$Oe+VNhCs&??iWTDBJKf#swA%M6P9-2JbnjErXCIw-t=@a> zaM$I3aR_2i0d$D)c|8*`{XU0gYp%YK=JFpo*h6`i4fF(&5kq9$iKEa;o`H{Y^=8u@ zf1%lYtAwXnW;Fn1XdX7ep$fYqdl@FBm^_<1XySUS-i2IT@dZyn#>D%XlgbD7rO8u0 z2EVO8Oit4_&1)3&S1FkHA`N^Ky=t&{7&s`*X*GsH!kevod;2YrF#m!>gix}43A_IS zehZM6S*p+hE$J2qNOu$E_F(MfU;yE%AO{L@&H+aUNc1M?R_pB@mwtvSUm762H__JlQcIK5P&Fl!(Jk0+ZnZ`fRKTg zH6(=c077^Logj6E;xe&L(y|X&$Zs-5Bh_MjMXF#d8(Pd@d@KyeN$0@?Q5MVG?)wGa0^p65}?03H0Nu$YE2Fl|ep3dPYY9ZXI5S~#9s*dW?5>{DMv zV+z5$HG@75rz61SYDGE_O`FJjI`$-R2N}Q*QYG(-gT|!vkNm+n8mA}??rEweCTJpW z@3eM(e35}Ss2DLpTi~l!WCuWfVpgnEM6~ja`A{R*c0d`j3+UG5pbwgk9sajrg#lbs zQiwZJ@H{=40aKquHzj;^pGD3c_Y9gUoy_HObB;s)cL2cb0^ARM01#D&-B$aa*ZrW? zYwUP?e&-$>g`RQX+&1Ttp1R*@-D|bsHi=ao;H>6PE`jQV{}%S}Z=hS^W+dU#N^Vgk z0xA8)^$x&<`Ma&|9#nul9NXG&K6GbppA}F+`UJIBULAVR}R~%GOI>TEf=(>&j zD=df{;MnhDon!ynL5uy%;PQDgC=_N`IbvLt@kakdEc0fJ-YZIcvSd8XyXcfU*V5C& z4w}=?djNSp(1EfqKT70e$#Wvxj~j%wGsX{f(8H_9Vt6OZ-y98dq?AG@%jfzxajgdz ze)}Dm4X+r5FZxR%eC`zWQI2$>bEk+gtaIl=%(+uxO-<@?o?c-LY|bYGr7iRoMQ5B! z(sm?$xfQiRb%X%sj+?6Fqpu{RDpA@vII_&%iCN2#&Tt~y1HE7Dm9l2>swp(QgeN~QmDkS767dB655cj1$AI`Y_%WwogM+`IfSDcoN{C(sZ~F~o-U!MN ztwkAg62x9c&&t@Mz%1Rp{U))Nq*b6-3{a34(3fO%Sr49eTum5%kv$3Co8yvToJRH} z%KhcZc#iBzRQe~uxR2~fRQo5v_>k;LtoBcWaU|K3SnHnz<4v+BvEDxk=3D_uXmb<; zPUH>BdyoiIKownmE;%5mi==ajPRF9EFBo#+Jx|M}s6sdDGa-*hvr+nY{liulP#y^e zD41XYs`^8496pVZ_(jIB_8Vqz?Ki>lRpZ0Ft^KZ2LKo1?&IH4UPU_ynE}^y+P`OxL zMh4ihHcjcPn|Xazk~yT>%D-pIMcvbDxeRi>Va!3@+iN)ta(8gcR0q{3QOiL&mRK~o z`0qRDs%+b@QJ%f868}6l4pua)qqGaUY^WI12=GhDffdHz5-W@!I{2SE=*hSOar$A& z?tMwdCf`Wcl&2HCB%fL;7bq!YX%nB}eiY_@zryE3Azwe-hsIYh`l88&eu#n{p z#HCi}r~-q#s?)NlaI1OK9V4Gu>2KsYorr_X@aN?_(caE$7{n$b)VOb)rKivdud!PP z{Ps%-fUtM>8@1y4jgk3L8T01Gd<^#=WL2Np^J55P*p zf;I_Yz)PffTRvQpq}J>9)gk|qvK>!U4j=9`hNWHpW#t%=D03#L-~!e@J0((YJNO^x z&~kjI2sMK{y%`F@va1-YK#XLAj*h>WDmT{X^6gr>x1p)zyI)TCE|;`?e^$OvhJUSl zxCiFGW(zF)j2tm2wrj&1NXb&3d)>)*{LX&&RX76U_=B+=m-CU!L~xj(*ft-JD~G+6 z8-$Sf_RX7g7ajb%&X3OSw0JH;@BqhdEXRFtgy*|5tk|_b%MASC+XCpy)aNSAF z6=Dwm589dGSyu+^B8pco5qvG)bpH-eWhtdWUbs)%9D!W#U z)eT03tVrTh$#2@~q3%$HJY_*tC9O75Fs+*`NOFn(_qM~FKK$ijF0D)(s!WyPs>oEW z^iiY==4rCQ0|g`N8ufNyxo$aV>riGZN;%Iy{sTADuUbVfyeM(9J0mfnH>q(a@|c5@aKW*5$VSgE>pEhiH z{eLIW=WL5AtXObKcx!vR>-XGK>Mv|vgJ6ZFjsPfTfB)32P>#N}K~9CIyk%=N@EH)Kq?Nis^QSV$gKxlZVps?tpTqAKmwFRIc| z{i4d2BKvis-ko0RMHDex=)D?S#*gh2|L;!Bo{=b6;RHhdU`4;y=Nv^WtKn)&Ii4zd zM16dz!@0Ik$2gq25)NC7g3W&Jc@O_z+6h)K3#}&ExIW_g7W)lr*|CqM#;_DV;u@Fw z)mVt@N)l>`oqWm8<=# ztjzl6yMZHC8O~OZxXP>jsw_7e$J5Gi#Cyb5Uh7w7xnmln%KcX;IB7oODzEpevfTPv zs%&4A7ZR+=aNvE!HQwk~W4VPLpvM1?X5nDmg}}M{=v7v31@ufz+pc0k(OCA|j2};> zhqCudSLtHkN*`hOwSW_}^iZ~6=_+07Tj?Wgz!Ft@C_Aupl`i+K^bz)8i7GvmO<1~0 zSNc}E?=~!M>Q2zThq4VzSLtfsO84D}#Z`Jwy%+w5nifN6e~h4L0W+zn!HN95>ZW@LZ~KCs&fEute7{76xAEO{#xX8LiuOf z3L45WA{M)Xjfh3HLse`gLWvGlG`iI;tj4kK$iol~&oQlNj&y@t72 z2!q2%AsU`zTG7bW!Xz5C8p9F|a~~PHXeg?Sc|h?T(~3r}DJIdV)krOx1@m0P++rT3 zXn2lkMI%=plW5dx3`;c3-RDt?hUb`8G*VSy5{+7oVTp#hF+EDr@Ep^MMyeD{qEV|c zfM|A1i#K!6N-CN}7R3wrpBh(fo>f|zNS%X8CTjT&GKt&Y8RfB&YKM0udGM^#%0p@- zO!836pHd#0=>sp7S1cTdKz>g%i3{fyk%;}4jqU4{%R^-g7*5u=fwtNL&R%jes{1IAC6$T3S@A>vIJrU%Mfh_5-cxXQo(XrB@?Wz>cbH% z*P{$BSe8JnU>VNOK!WAPODb3{t7L+;RXv?x|9wuiuN6Q=bl1mhGTc0~#sC%y!#;)= zB2r>QR9$Z*w>*l!9Zx3E-b9i$#nm+p@#)%j2c~pw3!W*iZ4NiZwM|P3ERNa?!Z}Zb z1&~m}GCMEtL1XOmM)7|$s(J>pWX$cX9F&5dD|Yum0O@9`iY9mu+h%f3sx6dLOJmWLqwXEDj4Z4lBm)agT}O&6KVp$55?e z;#Nv+N$%i|nUI#qnBu^$1WOB>R)U{oh7)7ePK_DDFK5^hJq4j##SBzda0Ly3Hg|Y; zB6CiRvG732RmBig6pKo(mYvQ3`ED!us>p&$UyW=Kg6=Q#>$`&PDEX-Pf{`AnEvu<= z;%iP*$*HPfN-8<2GZG&mhMeMtoQ5y{X#<(7pz2ix>s)HQhlh3k|Bj+5I3oeMjUYYw zUq{i8M@?KVb2?idfy@DwgDK|OaLfhZd@%&3!`(50_bd>=D2miGBwQc5x&Z%Yv?z5CZxwyTxwY|1lTrSqvww5bfTczcC zVWR}!ip^4Wy;*HGt8t+3F9GQL^Js2+zw-fMW!^q)Hz@jYyT0eUPrKJVPdvJJaL7oT zfdgpA+XfaL;yLN97uG5XVdF35FT3-|0%=mal)n`F^kVE&z!$WpypX>@5C|i?13}?C zT3g7^_lDbcc zYFG19?qw7+{a&jT^QQo_+m28drT&3ePNsha#lB*gLayW|U5u$m-1t=Jt&tZvE}?|n&?GXN$iau+SIW{XBpV-lh+P?GKu~uC)mJX zx6t&~i)uw%2t3f zqb`s{Dm2s~^c1r61SMn%!ZtFMqU4gkTZf?8+6J6Z% zOZgMAb55!WY#Ao77O>!}LUxEqDzWaC`=Brn2dlOVbv%swg1dH7RztP%m0fI2y zdcl5%^@Hd{Qu{hgXD?b){JbT_S=YdwiT-#A{l&PL>jLIOIghaU;<23TE}%J9Ivr-T z02AHYXp=o+lydb>r~dFkspErMn1ulUWAC0190&d$520}mVRVsAt&FtY+23QoJTe;w zdG6JFgbkRnYQiJOy@nVeC~%O|M@5)Z8;6|^Y}eIj^Jtb8Z43Ce30E)qOA;{5W;r~I zrdez;!@;^>3N0B!%ky)F6bLOtG+iKPz`?SjNfeTP6TLBXc|zHO{4{AoL7qfUu>7pD zgh=N&87!5ckR*hE*FkIaZBB;y0%`rXO1Gi)c?e;me97;<46U#8O7^#2OXTGAAK@Q$ ziu=7$^lajysPw_Sboe;Bv1vJcK5;2^fyvb=;ejD%B;mxM%s<77@LIk18ZeKaV1IET zalvY2FvDFGpA@t%O=E7M@UWyOUB$EMj zaY0FbPL)*+Kwz7&C4jo~HA56vJ{ctA12o05xehZF;Dx&r{0rvcP#_9lW}MIl$?w_N(1LSAyovu0qmF_bmo z0A0Xad^@nhbAWCi@YS30C`x%G&SO_}YFI=W**qIjGEaI-X&7dM@Z7v3&IE+P-W?;K z*i@52SbFY>o<+6;MOGJf-Vk-6HQ^Hp$y&diNG!5#kr7$y0D=NWB-mo`&H$Oyp?HS% za#|Mk0OBMk*@!fMDZcp!X^^+YY)|9?m0?Z73s37eVBzlzArT3bj6JJF+krM(~dTN1nCu9mH#e3{)hz`t2R4kUBHl=z z<_fJyFu`D@%av%W*=A`I`*!UtoUEqupG*K}(@zLz(~lf{OU!VjBa~Ji9#LivOqqQ0 zrwNLYV^9k4`JU$+R8h=tc9nouk9Ik{%a1F8Y6Ql%6xsf8K~EximZxi(hXbeHmHB_S3?>Y`{pwoc~)e50v7CSHd)p<@odtVi2@A>QJIJZM? z@stA?QRLq!{Ezq0WRQvK9LHsY&j(Lw5h7a|yFHhQtO>u4PD{(Fm80ttMgZ6a$rN4X zJBBKdCdKFFbCP4KDnzey@oh9M6ZDEhsYDl@Qqwe4qk%7|F@{l*H_(}3D|SHFJFcm8 z=0iCI!pA4rz!JMmXX%R(gODZk3`Gm09;or(qKcVKbe84~EyVsFy|X!lHf(~YDwI|@ zl8dZkWWq>NTj~oIprkFBzC~;MO@tIaKTakYW3-_eHap-%!FRZd%SS3|htSCnl7HDm`+_2KK%0uc|mE#vy#V)L}DlAr%qY0~Q6jo(O z!m1vNu-fX3RaJ#G&36caOjL2P!UT`xtP(#E7w|90fmkt$s~Rt^m<2PhPg|?UFR#@& zd1(_DE6ma4RW-_Mby)IRJAQes#mP&XyI5h4Ca+bayw-*#ul3`X*Ls}1wCRf#=4kR- zGs& zAUDcDr7AK|sf!F$DkB4x+ME+|r7aamO`OoD<%T9^!*LCR;x;e%;h=>Mo9*`Pn>X=P za0sK5=ZsW=1v%k48K_)Z;j^M(K}=bccOxh6SJ2C5KePoKk-WwI4NSE0Hk#3HZdRxj z>+sLv9-1@cLyS>w^udM?o%qcJn5YRq6*ET|6t8wX8kRd7#j_Bvo(+hjC0XGKp4 z8WCU8;xp08r&kP5)m~wIE=PmK-#e*M5L_=$cXv9i_I*mw4W@_P`#{p@-~%+V`Tn8r zf7=J+9rgv!gzDz)$cyUa_r6PW=v0@bNKErUKZkPIA!@cso)Pp1d!CJ9=BaDOOGD6% zI~a^URn>S&Q;na~T3hx4RaH)l5j>J}fX!XaOfO*E$gZ42(+vtRKfZ@zH9WWx71BmAZe$Bdn6=p^%eUMb>dWpKw#UDNsZ`+PPp{mqV@)OG`-f>oB%CZl5yS0-BIl&5ZaBHB$-Ijf+fkk z%dgyOx4=Q_mBcQ^aS75J)&?Z#3^T!<+9tSE-LiTVo4Q_nDFGp&Yw#moU9WC@CVZ>X z`;n5WrS&6?d&lNS8uyOPk2LO`%#SqlBds6!;3|wp!3r#;BYfsZ${gU)e~r<<8bx1L zZJ{VW$!`52IJY(cEyM%cSa6B#9d@Yaa$(w?-jNM02~4KN-5@*$-q7VpGHV* zaXgqez-H=W2b2`xS4ckI;)T3*@Ub&T0zjK&4ljkm@ylqO1H{ok#FB`tAad_*zw*u1 zPQfyr&;Kp4p8uhP-yM!FAms^tSSkSL_*DRZ)kg(znl?eOas3p||5k%_>v12f+^9v% z$^m#U`^}Z0IV=YU-D)rDF}HxhKuk^19eW*8#+EZ8KCi}Iks3hKfi=6CTr7O<>*yp) zh^mu>Rw+RpEJxiDFzXnnDY|1Xwsv>%oLWxV#Is7SL8u&jiO-FokDw_L8Qr_7vbV9V1p9P!4Es0;d;6{c(}x7@GNO)Kyj0iS^3~A0qSO5~)$2)i9<` zY<1M@SA8L!v4bJR`I$s=`?YDk`?KD0ipT z2CMUY>4Zc7EuBSUcc4eRlW3we&VnwZ^Kcf}-Sq)p<^fSpvtKLc>9-s-MqYL4C!W1)m>lpq8XDO~itum;U0Ot!ym~`H@dGLKI{UkP z>H<}hvosrNMQB@1kyuthB;^%d3J05wumbNH^i=JNuD}baRC3G#r?h0AtW7g`=?k`r z&^$i|@xx+sS^RKt?OUJ|`Ua{*Krgvs#bz;a&w4sN_&2k4@Wc zueu&O8(}nY*uLN1|DYW`23k2|BevGJvpQ`c)`>iYTHFQTvZ z?*`vy0KO}xjmd(g#g0795!Y*VmYct8$>|6)*U+vVD&bk{Np#hA?Z9zaYa^vn z`aXR;%I4(GXw_mA$R*yqwl%dQSP4#xfkqbq+O}4w< zWzJ}zV*n(t%oz>9;>i{>(P!q2<`W$nxNHY|HaXbh9Xm2-GzA;5pQtitw84a>7_(>2 zXkE6cIE7H)YMsMM&r)pj6=7x0Xi{973yI=vpolATMpL?%CNIT(LXlVIjAoNp=8Ohs z#Q23#>tC5On&6dL00zX&8BM)3HA_ZW@WKvVnKRna0Z=k$GCQz9(T9iubv2jM*Xs-Kfj0*b{sJn(06M}W7csz^rGWef-vQ*8lwG_@-S!!Gmwde_5X z8Co)imW`nmV`$YFx@rtvGls50`#6IexH=E$IhsY5!s+$JWLA-Li2 zW%ihalg*%oCG$KORdTd_)iNi87S_B}s8g#dACu)$1})s~9_)9835D=Gv-@0T(89pW zbnGb18MLs_8_r&Lx!!PeF>i*f&Y*=;2rGjYju%!2Eu3zK*W{H!3+F%&#cw>!GnXc> z3|csyyfSFvh)13@D^PHYGic#1kVTzfM%0=mieil!wztTjg*mt_4ol{7ffoMt94I!q z-q9;$;KFcf`xNXs@L!togB6R)LTUuz8QHFe8_x_wSl45ittuV?hVY*_pyg(u!R&e( zhT&t0BAkH((;J+x!VM1SE8vDgXm%1H3Yqj|4*pIYIPiR^Qv_pJQ+Gf9@WAgGoy4uR zla5B2O~zvf5d3uLs?oK;=Py2?5Ehu$p~WY)>pIs+lSg1g|XIJH}os&}Fq$9wH`!CJI?NxHhWZMS!3Qn#c`riBkXxJvPQy&F%6w*u^xL%Z&;cUu`st z%V3RFTyAWwRqGpDtA3%`7YTe5o!bIXKmr5qZ6CG)V{5=q_FW7jC1I zUN4u{z4a;>d}Z+B{_fAJMc2RhIRrZ)eSr5Gf!aI#Q*nylkLrIh13n6J($ zB)7}<`{fMNS12wMdQ$u}V)|~Qi+B8Xcfa#Or&E9UR%ylG_4fcC&Z7q1sgAN2#dVB( zeHt#3M{4LlihmCN0pKBP8M$ZK&sSUB9%w_!rW=1wi=h`K*qYhkfvsdUGiXXws@Z#Z z;8QpvV8@^}^ms_Fb{5J!mH%V{P_BPMP_BRE;NNgiRVmV&K$Hw?>xwM+i=F*F_De6U zkehw*CA#_};>!`T@*g|s>4XJwXHCTmQ;vN(VQPA{f;Kn?S%*ov0BM9uyT2$CjouL; ze?717_IUI(tZslkL)gO3;|# zD<@Z;go+yCxn`yp^U;ad>b=(vcU}G$ht6yAWQ@6mxXk^T$o<_pG+T3Ze9&C}BZ38= zXW2kc5E-%K?M@uJRPqdbl&d$J?)VGM=36B^%`&S2sBH7FK~R`nk-ZEXq#k$B#PwFa z3%R)B3!Z?CYu^?~PlKfT20vi0FiY%V=r{k+LE|(vcYO0;x7CAYC4XcNvhPjv8U-P; z3hzZ4_$GSQ;CwW2P}D*eB)r+Wx3}K{3G*-bG%dTKd>J)H!D=FQR@!!P2-}_ovhBHxE`{@JAMSZP&!b`% zIQUOtF%4y4+Lk^QilbLL82b2HIG$SAAlfnPonAy^gz8Ofl?J?c8i8~2{0BR&U4LZ{ z_=)#G(ygA~sS_1aI$6h_1nwZ0WwN^uL^}<}>~rLz|6m-A(`4P#6#aMC?{?oNb-@>- zxdm;3ud;{H`pPI%M6~kfe5jFYJK(;x3s(gML>e?5JN$3Mc22=-N(ymD3ZADYGhkkD z(M<`%aRktHz*;?nrb;Jsx!j!NkpCSZV7LJ6M;`#h{$aP(zUOs6X!RO9-k#sN2S*{m zYQKo)JknG5JFR;y!j;4*C;$Y1{^U|4vdX@JZZQ~j_&lp`r--YGA&B4@O9mzSmEnX7 z&x-z7{G^O<9H5Bb-a)+sGzb1}tGfpk7=a3mZS6N7x-+-WqBFEXJ@VVDtgjWwLoj$~ zst?I;c(}T;w1uX0zKvt3+yyiv3apN&(fI}ZD-J3so#Cw#bY1Oy$$lT}9Q)S}TI^p2 zm(P>rv`!0!Pg6mQaZ$z_{S&dwn@i6{`q%|@-a=5lWIWA-f?AzxDLL^@h~*hB=b;BR z?u)6$zDR`*lurn~#<}aW+(6#ZbQ_VACC`a$KW+?%vf7%(@J^P$IT|)NDTPj!&-HKO zT8}D|U^Wb$JGn?ie<_5|oq@>u#u(PQb0Oy3sowo1bV14Kc0*T1rkGAM=0G15YR^cBGBH<1H`4)#hL2fWRR&pq!d zU=u2bka9&>)HlGXr1K3Wd6mvbpAaij_K6FwrmPaf=qXnRNIApbp!cA#@b~-=dyg^x z#(JsI+}>_%EN?Z+Wx(H9TVLLSyWi#2LZh&{-mH|?SGVKvH-MZdyRB{i?B3RXCy(H! zp^QSEtz2X0<_0qH}Ol&f#=S_S>1&hI09tS#5yb?9wq=ZLAk7YgVfbc(|Hb zZH(Z|YJ+>INc5r1YJ;*^z#d132}xy3kXda!sP7*7Zf3P{>N?6h?#R;E$Vg_jLHXG& z+$NdTh9_Cgj*czyv*@^=S#A7mv!;CSdGzPt;i+0}+&eh*sO1J&Ywvj5VDC&|9X!Bf zSbMyk?H?Jz#-0lQF?pv`XYlI8{Qh`uZcl$L0QwpN%-0B*V<4HLMqwd53C_#sB*#E! zk>v8(312=h;M-_gCI~k*NB89$!QB`p4pxnZ224hCJ!(acLA(9k?AMpj zGbYXrQ&`l?a5Df+k{*%bK5anP9tVT5@dQU9}|!xVKfkzLMS+XY}5VO%5D zZ_(O*6QOVWo&B!2+qxfkFkOsDG~?l5tv8G@`@NtqfR%;T7r+XibPd9@6pT@P2FzxG zfGP-(UE!;dW-6fqg91+e8SWB!{0+q1CPb$nrl-aLmi>t?{e@*e-nAO_hdhOG61)9) zM)VwrYJx)vyW$HD+qvV@)CtAVes;mW2UrG*s4^Z7!uWHVd&V(r6sdU%35%h(_3yk# z=vNsJhq$XSL;q`oFyrBf7na78P*sf}IXbxlv!TpcAs9iqD#XuO3*cTf$>%JuVw|c51q0 zs&e&tTzNRYpHmRC^RSPHgBZb(;tOc7r^l{XQkBIz2$Sx#GM?1g%PRFRi2HzpFS}?` zclV_^bgIizB>E;m7(zMhjNlHyq4D5iOrC)9KfFA3&G>LwJbo>X#p9gT7lKyQROOF9 zhsRwbmV>2k>01l%sL;#Yff&@*0d9mgamocjW?*y4GooF*qhzAuIZX3j7&MMdKWP^y z`>>z_Dx$+<$KV0<6v9;-Bs~)pT24UP&HkszJ@K<>AMjF!>fm>NhEW}o+Xt*xR|}ba z0EJh|>;no;DrmLLK7czyxI$k(d3}Nc**gx->;tqr;pvXwIP7!?Q2;aDlAEKACTDx z04-T?xRCoD`&y~rf&(Uo*zAOF!7GE9j_RqG?WtFfp1NW`7B2hh(NoXt1Hh36z@`E_ z0_93Q2vcsVVidbdZ7?F!DU`cLjI$%I$q-9{{wjjASA#0OMX; zt5laa{APW*vgX&9*H??%%a!JKp}1XI-Kf;p;#eOt`+%Qq{D9xV=wFQ@LNUQBjQq`A=C71u zUYUek#(|6*XnId8CQ4<_Cjo}N_2vrsc#9YE*1^Zl90`Ey8*_Lm6pmjGVX|`ofg~sC zT*zGSUHuIJ7U!k z`m%AQOH(hF8-AbHE4(;b>SbNB0k|{VRu<$AXeVhKt=e6)I)w`oty5f1_z!aE%!n+5>DQ; zz@iTN2p42i8fQV5(Rm<423p%jul2z9_Ui5WJ>d9hHGzj0jVx_PirA zNz)Y5#MGMr)O8u10tcn-);;f_v)|iyFS{4f1j+%1*)-sapd1`VrbkMnUmsa_e$^RS zdYZ6TE#6*0S5$Z_U}9}I>zyXUxFTdkxTW+5)mDB!)Q++V#_!(TzjZLctn^zB8Y8c| z^b^nCHB1ip91V?ZBSm<)gDx$iOJ2Rv^tKP%(1iPKXw7!7voGkBDb@09q!poUHOu<4 zVtg;%+EN(37toYP5wL{1<_UBexO79V5945JlVdc_r6uzmZJM}CU$D)8=4mmAAC{KO z;)jE4o4|{83nld5pp`%i7CNlF%Qy1qlS%YPFdr~xXlNBi{_I}jZ08<(pwf}g=iEhf zg@v=vC_^~JnL}sU(}Uf5!{6E8ZTcMuK96Q1!CrBG&itS>@0cIJAxD;x@R6<;JCOkXL+^k#TWUq{cU=3pP4cWUbYSMtckXg(s86KIUayON*m z^E}_@`9$pVOZgM-JhC)OZ5i0po<&JF{}6pFP4r7}YPaWoDL)$X^mR~4ylN{qDp_VD z?6MxZ5}inDUx!KWMQe(mx1>1hx~ylZ6Bqgqub}UbqideGz2EtOEU*?J$+gu=Rnz!T zI#7WA+M#ry;#Y(I0&YO2V1scziz2q)5SmA0TdjH*#;`{EsVQw>CeePwETR1-Gk~h( zsle~EAun49f(#nO%evEUvlqlKQdE*a@SA5C{3aPrDX5Vwx=&FZi3ckDo<^tSZ2*Cy*w+1HZF3pTi1sV(`5IZ$jB&Mh1s{ujti3Y;_?j2v5FV6bWddC1YsW z7+NugR*j*n#?Uom=(-4n1&#s2m4IxL1H(ZzW`HT4{!{~lxJ51Q8FZ3Mfyv;J;e%dR zf!GSF^)TNT^yhX2oyf33Ki{{4e&$hpi%mwh6^jjGB za7;RL0LPseZ7K9{Ibi0JMJjB6=uMnId?41TP1Vs zVLTdUj{*&zF>~w@vvvYuWx$^)gq1n=#0x8P>`CV}rpYUF>`CXSk~#LIlUL^06VXfx z7WNGIGZDOH=GbF{MEwjn_WXJd6q!PMSWph08T2Q3B*EQem$3MHMR`Z61S)AfF+6wmjJ26NWhi3@cH zOnbr?>SnL149Al$R$Z_cnK2k*fdRU-gsUjV2h*KHu!7i=T=i1)mTU9Hr$cv+u4SoP zd_ut$H!VnuPipset`V(@SS|&(s|7cL3k(>&kdcg1bfd7yi%%&LO78p(j-XuwUjx4b zmT+CL=Jjbp!M$%ZCqN)xyr$pXx@LH7Qf7I@>zWpB%^>UXMnhe65og(RgKM`SY=?)z zmHy(2xXD#+XWbYEIQfEd^($Ubz{Y|}Z}GXvrLa3sud(E1mW=#t=w2V9>MN{_x*WYT zcNH2MhSFt;&Mwo{=plHhMJ;u9%dSReTuzqM+4Z{`ouL(3qO*H>HF}6bM=uP!uveoq zIvY#s>?U80zDDuOEYaB&zZ!i#6df#?q^$xmb#TuIze7RW5e ztBrbbxw2hcTduE{{pI?`TBBSq7whYdzMgrT=p5m0rPersacH}~=ery3nram5c|kmz z)lOxt;DPDRDqQuRy0;(12OHsMMwX@#BFhDA{C;u2LqcjVfECr3P@JVSx?(XeI^j}_ zp4uB=S@jyS8^$w9q9sc*t{X8Is=WjUg&Q$R=D?ZiBPY(V^E2b__bo8(`Ua{*HcE?V zpU^-D-$qxQ)I3=?p zyjJhMcDU=FV1IETalvY2FoR-`PYPO>@)V+vXCgD{IW${yb3|4zNV zXchV=D_leh-k*w#W#E9c0930zMQWb2=26htdiy zi|TEXLemy$ez*jT>tcg6$lGGJCrtsBVNJpdPwO{e;qMC}5ebxxJ&R#5vrW+o%YVQY z=^)eU^mYdpBEIz?8;woA(P*`s{)b1L$wu&x9kj%|lDfIEWU6i{csck$)5cU0ZzNB1 zg;pe(V6f8VO0?B%v$Sd9ubqX1+*JOP2|&R32|>X4k%MoE8IE*>(#pdl%FF@(FFyIx z1jWcPc!2R`mFF8&Q4ChQ|Cd(e@(_v~%qpZTWl?1Ap+?)vQ_o~#rJ}J{9;PVyt73~I zoXZsDu`(XZ>Ugxv;az^%2>jq+YzMCNxQ{nve=*;TB(HjXE5&6Ukza_~i~BH=p6WGR zOFkyJz`Y%CN!|rbv#83P&B+g7Ququc5WN(MF1_Oo(aVwO@{->Wy%LEoum26vtC8r^ zFd&8Z=HgfTo^sv(M~cPI3s+(7#N(QeK{B>>#22x(Kv4PrzH*dT*miX1mTT{@}Wsb6hA}Di|WmapMqwHwGq`Vpdz^GGSqYma@ z1qXB3P$T&Z2RHVb!BFNXn>orJ?lB*FFy9-)-$aPvvF*0*`{Fzucc-hWR2{(l<4(b6 zub&CfG?&KUhGPW3CX9Smh;zP3+ol6JiN=M}-`v?VM_CTG9-R0xN7>9#7VOleBSNAp z$czE+1f`8Qg0M12SuyP=@G8z6W#fgVIeumgcKMI>L_Y z=76L5%s*Da4dz^WC86!H&ytB8a2=fa$HMvE?AmAJlk1?#&G!#||J&ff4hCB6D>nJh z1tTn}jqZXi&*R{*%lu1<{h7H?WB=S&*wS$Uri`{6h`)lQ5X?c6g-&n4!R#%tn zTczf5VSQ_>UTUsyR|=JWgq1^di>+1*>mC^DR5G{Nw!d4u1^flCB<473bKQY`XHb}$fyqjIs*m&BBMU&_H0J+s?mwr zIwYe$fcZwc7dT6{uLX1d7kTvjV&v$#gr2G80sbvy zy=4lEI!0~=@H9iLUq*c(bbOgb*Y|*%kdbPD;&YRn@Gw8!nZS(j8HUjq?Dl2H&n&t! zi>}O~EB_v@YnlOy2qmT)`;|gw(FI-u3?=MXg_T)!WfolkhZINnu5Z{cr;t}>(Us0Z zDYNKGC$G$+D{{EXEV?p_u3l{wpS}Tpusy@E#J=UqEV>HPdP^b+$S?^PAj2dkfDBs= z#4f|u3}Nerunid|840A#0G5IBCRPcQm;XwjQWyEUQW+Vj)W(7bP0qI=i>{3NfWZn% z+sKeTR7QQ!@NpE-8(-3ZD44mK?EY5_PXWecUl>{%pUeJFcC9Z{Fec)08~E8})CbbZ zm@*EC)8&AgKL=-~U72YYSkB_iv@4@N=si?sXpJ4AVxujrMk7TQCu!o8vb&n z+$=3`6w9m2#ntkbU#a*_e|`HA+IUr}8>=3e?vyecug`{!*Q2IBST9!AGU@}um_Yau zGU@|AqdrKBJCac!5U3WTD_2H+zI2=mlyXgE)CXba*{FqIMtu+{D--(#X4D7C zA+2#neGs`dHm+m~IHNuwbUP84cAKg^qdtI5Q9{DxjQW6GWQnUoDmI)^A0$REFmNkm z)CbgW#K2{cQ6HE|Bs1y*f_Y=`03HFVgg7SLjQW76Kqy`YkTT?ANf}0Cp1uwav#%n{ zh60r=T)Eig6o7kx#=Hj@(7;7?9MlI<+SqcnvXSXdGu>&^=SB~i?ljY#+IV(@8~CJ~ zx=eSP=}ui6Pj99>&2*=X3>R#SfLzr?aXAP}X{I~Pbf<#E*UM;QcUrrCMjM;a#*WrT za7G&ogF&*~X0)+MFC;VCSf$%Yr>cxLw%+N~A0C2dV!d(S>rrlV-3TqCjU|R68Eq_r z^#?SopIzPQA7b?1j-oGnUaP(Bcl>t4_jbFzJ#V-E(C>JS`tGg=2)ka#?;Up99-s&{ zTZFr|z6+G^-LBvD)=I?Bfn&>Hc+mWR z<)A683ht@R9_0eq+--sP=4sjpfkN+7aNW#V@dRxyov)+Qln!&{02Uj+xe_#o<+?@3 zIdMf>PhOvZwjQZD3%H9;sVTZ+uS3fCoF+Iaqw{Lq73sMuT&PDyPtL`3kWpaFJ;@TH zeMxkw1a)u#;GZ*kp1O`;$c}I_&s{W5Q*_5(Z0+viIklX!__Io`L8vHl3Y7uS3DA^a z=nt(SKHyCoq}it_E`&>v$=d`?o@Btd- zePo)X-P8J{jboRar6(eGWK-y*a&4>*C+7~089|G?@L zE=aUaaXH~1zy9m>-fzZunls<0dlFwYyuBDIuw)j>)!X%Zu!UX4l=Iyt49|n6q*j|!t5iQ9wItBa4?bbc- zptIlGcQ3ma(FDrfc>T?30JTK9m*C4tY4qzO>&~w_BXB9_Y`crM7tj@TlL|+(cC+4T zvfb+HMr9SZlpdk#K;}a&DO+Xy?#=yM2LsGYzvZAY@~TTe@$6m0^nlON(8yL&gonH6 z(jvO#)f-K3`>+j7xKDT6z0Urw$4v0t(Px~c*+?rw+iJeYX7}W!XV4tky~7^hP$uqw z*mKXgQ}@3{ddls?cCWSPFKvWD`JX{g)voBBB6P)*gF_JfS6VVp*QPnV^aa~gXr3X1 z_+hcREPgn+b~iubzJn?ec`c%aezif17K0Y#2HSh|$t3zCm?b`-ifE8*6HY?w>z@gu zlS_!%<#@Y(x9e%!!ugzALSJB&3RTVXU^F)mVbryIs^+~gKbQKQBAgP_=2IHr9XRS? z7oBD`5eL9|gx^DmhON8`5^%@JCpPHPtzr^WhIO|O=R%fipq5oO6Z{Fuwf1efe?YdQ zLkfLdigCUf9_QE5^K2Z$r(tS01dY^2=N;|;D|zH%G#?ptC(sy;cO^gB=Xt)*^NHB! zm+~iK%-ELmvGA4@C zUklK!R)D-Lg|Z}LqQDoV@L+EZ;tTfkVpYzVC_F}kF*-knGbW1UN(E4Ga!usf>t}+? z1FnvI4V{E-exu%70c-Z|k;iNYonXni`5QFZw@P@1Db99V_x+$#YU8KWn zT0tkSuA{OqC>#+-u1Lm2aj*{{_@FlSJHb?LzpFU{V?fcJywj<-yCgq+nhtT*;{cGoA##P%@r$Zt7gZ4} zqGClEKveIbNiHhFc+9`5!eSl5tJ!_ir6)D&oQa#k_|H_0~HW!Jj0X;UeS{*9P;32zWk5B)Lj53b`x`}Ef}tX z@b+*F7&9h{jEUmV_rL9Xpxy;l8_HoYU+tjD&G%U_9J%>dJagl)*>1!5;X?%I-3zkg zC-npY8^lvxp17JPSOqHMEn}h}_U@W(nb~;rdAMN@jOUb_?|IGGz&KSUZ86+X%zad& zHg$rO3zh5=HkRs;O$)Vp!r*l)tO31}Qjuxi{epQ}rk}V1854z3sZFez8e~j&+HJ4k zn#T9y6H1h#8|Ca-d`gK>a_56NqT_e{4p_%^!NwO%HzA?C$Sa!D4n(-7-`%=u?~+?1 zQf5E6@^xqAoDp;o%}A@L5Jc#y2qP8fZPGwcc4I9Sv#FOBC1auhfkugM2=vMOAq~17J!7JfSFXBbGA4>dOcegA?>Dwfh2_#}V|}@@wz|H&wbf`Ym)5o`tJT%w z8vOq#nJ9{@YhI;XDQ8R+852bp{oNA!%Q5tXYHaO&(5W9Bz&yIQ-}HCA@PiWMy%NG30c4B^){AgsDsoE40UHiG}$zm3hD#TUC4|iZ3_NYz(u!KztsS3-5%M}-@lKCn8I(0FQEgL{3x zhc2262{ujLo)~mT@dEuFCpve~vU4xQ$Y@k6ChPyXm|_O8fZ1iTfUIcRf0L5J2MK6cP~OZj=1F!%L9SV84*WbkN( z4dM;ZE&ibsU6l+zViEsHWv=YMjf(LC%>kgmN7{BxPt7@<$>tL`0%Y@QO&R8^ai(m|SCj8R2k+g-~D(4|JG#IULJa_%wpy zuCvfnB2ySmJG_ZXC>ZN@H_kLP(o^h0QMI6Ep@a(_6-tcFjf4Lo96rLr4ta1!eR0JG z5}TLhU?^kTY%OD44QDR;gNzc{aEd7E#>5^;*^Okbz5QH!%d6!o{znc{=W2EoG9GXu zqa^LAZYkbKHZ)THD}6Ci3+A3+1;o#UJ5;DQ%YLOyb^N$ztYdUQCETt!_&;@!x;n<4 zDI=0heYlM=MGT4YzjU;Lx>c zg#o-*~5{o)VZ;cp<0_!GJFA4*mzDnv450ca>hOwNGgM#!p9M z8hbJRoEp)lioj19V@0+Nws_&i=m=i+@$}s(JZ~#u&)X-_Rj{fET|kZ#ll;ITccgSX z`N&N=#7nOyo>mi1zooAy+xJEmN3!J!^`z2G%g!jWWi)S~!qKehh5LwWPA?p-b_XB^ zUPlSBhFT;N7phXWf(PD%e%@IXbhX)w`C_HTK@ol&zQIrnQfqi|ny+F8kP-w@qLl zJ2-e0nYFZs;r<0QTZq1@sY*{UBo(=Q*fpAAP3ab5r8MZ@gr>%xn6_dQg&Fsh3t`MP z5v#5tk0I(r<8m3EBJ@8v$fXUSRVJkUoRLiwtnHYNG|+5PXgaS;u<62jPC7hHQM5Be zG<%i~Ab>Me7%+Akjz;rM#A>3d&xU|S_p1&%ODmzK%hgpEqqPfag)c?k8eB{HCYn}K z8=5BSm-zUkRpYuR*8A1U$|_w57b18hM{~lR`~n7^37K4=+ZsZP{f49VgdpW4ZFa?2 zBCVDwl!|)#SN?f`T2B^rO?;;;d-h{I;EcBaz*PItC{=D9Gay<3n?(7 z>Nvhh!OD^n3>K`-TQc~Z)>JZ_jT6yT)zvZdL|GCuwOT*5+G?R(z#lm%ZdV)Wwu!dq z=@e*yq<2c)SO;3O%chiN)l0S@sbQdqh+=dKg()56gAfWq$4$6zN<+$)N$}7{@8fP z!x!!bOGfy`i}F!qrNipO@D%*n)kl4?&rpdSNTpi*%o8^)l`1iNC+n`Nw63nuk=;nl zPDOt++<>m~X82ic6EsA((rJhmEE=M`%NJGjub)SM4r+W=L3Honkll@b(CY1Y+W=R) zMVKqrDytis4hSJgq}B6wz-CW;Z?Nz8{SH_^!uMG;-SHcT9Uuefc|m9%Q5+gDebKDc zG+=$O1k`>5Vx#t(pfRdI{1s~D296x316%BbF7`Rq2PL<3RVX9oub<<9_qoMW&ItQA z3V-keG#O-~f)a?G%K6|aEkdx+$gTZAP@({fK5VeBeT1ZYfvbGSylwG$`Ft4oSh^1s zh1?SAq6CILX2ddpsF#I=ZF#gBjST3ETmTT`Qi{+M`nGfiO}2qjIRp9vuq@FX<4!N? zj6pzBGN7-XN1>ho|2FK^XCAW|&{qcZ)dwpggfNr=eWie7%7DI72rC2nN+GNa=xekP zi;S58eHqR>F`KE7yw(z)+R{N^YCv)s&{qcZ8y90DMgiGH^pe28I(a4wg8e zItZ8~Fu)0rLBpw+j-I-LI~q>CeDu^6pw4jWm7}L#wo9gZ^wbp~$Z*+LkDhwfUiP)Y zQon_!8WaN>bjL#hm;QbZa4mR>3cmDo2J=W|<6BK+Dbvxzd)> z)=gsDFfA2^F$?H5{BIqU7pNrALL9>gPX%ho)Cgc_MZvT(-JvJ84F5~OJSH?_vrwfD z&$jGu`Y@SBF>O8tGSlSrnN~;H10;~TVj#N!Xh#m4gm<+IHps5dPYF!eufff@q9X@l z9r?>j=BkooP_#0`WRTEcnE_z^0-ESBVmPj3U7P~PiU(lOSOda7XtxAc02YSV=kZ;m zLxY8=HhmAtAi}tUcMi=M7`^bz3bqUrEpF&Lme$rVryg%`s}z1355j&LK5F#i z4m8Nw6eP*dDG@C*BCCxvuiY=9)AfUv*V;SS_4nWww(CCUK1)mwyuwy>yHVd-TV8F{ zi_4Ym;@Wb3z3ea7H`W^EdbwC%Z>;r04sy|{d;1=_AN~ONGl7!xnUSSwgb4Q|7~cQl zeusqAUI41JFQJ$%4EUs5ZmLhX^s2b_2GFOyhU`{?tkP)7l8oy{?xAZh!G+k3m?W98 zBZ|30XBXPFTlrDK~Tyy)-4+e5Av^#8sy+7U^4mPjN@x!uX4%qDN1x>Ui zcd#Xc;cdzPs?V1EG;NSzQT!D49GPF4yVs{-8Xl>k|0w=B_y-_6EhG0V`}u0C+k*o)0o}x((_-jFkz%!N|7?Yv zK~thq&ECTUpB{K&L1Tw}9+In_g)&d&Kbdgz`JYU{I{PCBgQr7PDbkyOSOcr#iY)kx zo&7!bOE0XDo4rpbQgsvY<%n_8kHMTPVL{wkQ}M!jgD)pcO|Mqa2G&je;Ia;nUeI(= zY4;apqUm7-s9Mfz{#GJ`A$pxzvl!kS^ofJcgsKtpH`5maz`IbL%tWPwmIS;D({t5| z-V#G9*^LfniB$t+&N5?Y`G_MV1UBCxcxZ`?i%+n9n_Tyvi4b|5L$ftkccLa3IIwQy znK>`?1Odq)Eaf6Q%t12nQ4SFP+;M>A0}j7wmYHIp?TYMW*dW>5K>`Rt+JsL)#)Lwe zkpvEs>KoAC4t86;6-LfE;0B5l;mMZv!NI2aQ9|CsR8cbQ6#X z9S4&%Hm8u25Vi)=66S0Soy}Cec!;;Thc^V)+ax z)i%1U>>J)H!D=FQR&+Dff2~fu?O7n(p1bH$I6q?2%JV!b#j1n<6c*D^2BvN4Q=vF| zrR8LGEgVlRY!K}j_D(ONF`}Ij;a0j_8limIO1Fx|U%45HiDtT8NG$yZ4(1QkWfBaw^jnib^)0@3|54uRV z`!=Zyz8K9dXbXImJ&e{@Mwud_l|Sc0ja=KQx0|~_6%bh0^U2k!f?rcoh&xj7JUy8K z+vDKKri8EVv&gyQo^C2}Gq=y8GqgcH z@*7~N*H_`qF}U|^st?I;^02xDE^Mr_$5^=wXhsxR9Z#e43mE*&3rc5rs{~zFJ72Qj z$2!OUwSyM>m%-)pBss0qLgCX?&|+Ma@kakdEc51)?Y_xGF12JlVS>t9ooguq?+(Di zYDXL3v?k&eQ;q#>9y(C=C7X&I`*==d`*DN7+$MGJw4X96`?guR6d||zM&+q!YJqyVnwE& zxZrBaW#zh6Pq{ik%GpIO*~G$T6ZV=P_S`e>)cvp3@A;kE%yMDr@w>=9Dg_MKUSw=7 z8Cy#ieJ`;yc(qW~0RbPo8{O|Z&ZD{1-8-@~xNX551+$6JqMOTnz|HE2%m>_A`eNk# zy@Z}I`Foqfq8jW?be8Gy;dYIv-U)xIIHU}yHgX8~_5@GWT8pNF?^8r8X9UKA}y`1;#9V8^I z%m+M$TXW_Eoht9YvyZH)bQ<^vvVdF4c(Twd!5_0H*b5Sq{Y zx{7ng{a^!=sp1c~Y+#cMPGXG8o=XeCGOn`{qqPYQ4<$J7VqT;LP=#Qsx9RyWC(WzHDzO*Z8WHz?^iV zS2=V7GX&ekPUupHSyH^CWTM&%OmDt_==pSPkMB4K?az$I_pyM*|M3#~i*e0tcCEC&;a=jV;2yK5Rc*EBbM7L#!ot~S)O;G^ z%%QXF>A`Nj;qUD4HvJ9+pGPy1V6QkoXMO+(@#Y5r74Kd^b1W&{&a(iyyp1;5BW90S zC(f-8N*y2=@>mEM5BKi*Juq;+_0Y!T_pThvQ&ti)(kd-O4WDG6O z&lyqxSbali6+(l?(Ig553wZtMcb-QOEU&n7O4>U+XVhmWHhS6HFo z5Gfo)4U<@jdy~YCf4LJ&UWyOUB$EMng$aMQbE>SQ+bHJ#JEFMq$sidYpedFOcfyoz zr92!8M4jr46WSm-0%sx1ks2fn%`Cyee+IVJO3K1ag{@*NADLUwr!f~ZuYV9OtM(~4 zp}r?th5H{$M?}^PtR9B4CLEv(c#CfbRsyHd_&kbI9*OhV6`dLuQRb=0MwHBxUJMw9 z*&q&pju4sAnmw3!7j?UDy?eECBgEfXKE*M$|i0%mjQWLgsWR zGXJL)T9yGFNF&X!__~^eHb{fKEoOVo@0Az+DBiF4g^-8@xDRZoXE9G$wyjz@l0R$* znO3K_JFpP(tq1j&;v0=tyXk)@HW&jM+fgWxCEk_P&5b2fbxX0y%7G1Is)#p|r@2Ba z5=<~y>2f97WZq&*}T00b%f7A zLOPy`uoxLrf&MY^3LDL!sx*ZntD@Sp$*1^_O>Y zhUle8ba`86h+d9Fm-l#v=#@xxd0lCUUX4VTH<^pihEMVCdA*{+;9UG_-&3yJU!}14 zd7)9!PCTyp7$jqBM|=@m3-k($zwaEw!Qb7a0#WrYN7v1yfb9gYZ{}Nmd|uzRt9j&J zMlt3-PVHKL(!CmkRmm|wujTWx(HNv1a60M2?`d>O{sz4!%Z{@Wz%S5;259%* zf#2B!MWo`B7JLVSQc`yC?rhY1UeO~A2}~U-UoVOTK%pu_OUBT$F|=X~tr|mDjiGDC z&~*_C3UlU%QDUTx#5V`3F~b44w7?*4Q42nUPI4(Q89XwFx$CNDLP51F1T++|1nT0x z{He#_1^2#{2{jmgp9L<|L!{!o>s+X;A~17gY^WI1%;kSC`n#6E3lHSH^~#!e1%#gi9f;%o8q!urg1$%o8s2giGPswJM#j6wkr6 zBY1XcJBZ8^uJ3rvJmH8(y-??xebq8HRGY8b&paFIujfFqDYS=$j1Lv?OXTg^BlCsB ze`$7iQ`VFMGYXO}8o^x}R{b{QO(LF=?Ha_DT7k^vO{efl$=x(Omp9-V3(=YsLg2HH z3E!!WtK=Y~F%1KSkD<}ph~B@V$n6gPP8^MCi10=6<^60CnZ9QvGPTxD&QOWXJu-i` zo%(^_1-FyqrZRmxbk*ou7{3>vP&n32>(Jtp+I5{f%hoELH=Ho12A8S@cjg9X0v8rX zjamURl2M9obf#Q6D^gp5=F80QB$cXQ4|ZG%Wxgf%i^9DB?-0*i!S1n z4sLMU7KH8aupKa#SrHew%B8FugM=twP;Pw13koQoEXi|`yI|MKodkwR`gJ};)$s7; zWJ*k&Leu105?TuXFz>#jy;x=c%Sb{(%qXWBeV zbaqd#Mqj0{JC^9|0$+{378jk}?5oj3I6A~uOv;^DIJmZJ^2JMvErlE25~nbJdTbn5 zo27DjZLLyW-te3C<;t30UtV7=ZZB7w+lAtGX?3Gg?@Lv@iOvb0VsHDf-Jm|Z?K)85 zY`AMGcdZxj8Cz|1s;fm040YBr*zC^+Z1%Bu!+tV}{wOEBVPQFLdaLEN&xSW_;IT`n zk34Ov2Zz`a`T{e$WS>FRJP*87n}_XYz1{N^PhH~cn)V&A;7a`taqvx>5BUBXd}R@s z`ExIyS3v^q82L20I&vfWsF=jA+ioxZN{q2Q_Y@?#ss`1_W&xhUP&mc(z8SV{y^fww zP1gi)JvH_JD`3ou(R>6S=mZ+0@veX+Yy9(kpXU>?&ok>)&cZfS>(&4B6#9cv^i_44 z?D~L90mNC9rr}EWp!Acy}DQm_KshJp@~HfL4b5%JAqE zJj!!}CkNh>Ppj}W4^PM7DK#k-d;56@>>)pPF4J^YfX0f_iM?L$fUiAN$#33(+J5X@ zBBi%I#{Yg4#YS+?TM{PxaMTh~)UdFMgRkqs*J1&zw{mw{Z34Mh8gpi6XP2gcbQ5^W z+;NmE0i)YxbRMWofB_BmI}dztH*OOy6L75n$~NFiOR6$hx)w{jHi5=*&Kb=uEu&Mg zTx_@Qc?X^S-oAU;y@)1I4j4wJ0r(x|UV<+prO~gCtUJHzj4Zu`oNf2|?dQ>#)t*Ir zMDjjWDsAAu8%6O=Wf%3exF!rX_gMCsvhuyUbec5)NT8I11~?x1J4>I7l|M|bc+?q9 zEB>D+;=ghG1@skF{A}o?kbh+z9~yO5PsNTNDckY8H}`KHh$S_#TM^XWQNud^>m4k-cRY-C}BVYC>p6iBfoSHes3ljy1qDriKk z)uk`@>-_O!qx8i-(^TvvRy&r>^}kL`MYC3@WK=XGql5$ZfqL$rywj<-yM$pNSWRpS z13B_4k?y^S7HU^?@Wv2)S;k3I$T(>jO9AHYNU}rOSz2;rLuO*4kzrBW^niyXuc_-d>PSlUd>~)W}(ce})G@9*9;LO{{1!yTQ2D8FE1F%sa!m>$BiwNe?_V zM8K!$KL=}ebeSc-U9cgVmCO=9g}roUiJwAPnI(P-VP%&1eJy{r8J=BU_FqPhz!|ct z`u2u7Ly9VANa!Y2ZXZ)vmMbg@%9UrvNrMpGP*Z15tQdc1miPr{DMkOvIBCqb@&)P9 z5vJ2A4MwMvyU2m7R_AkSXpWcTN_~P+h{H~n&pk!r0u#!~ZB z!BX=r09gD68tBqFuKvt;3PlZcs~eT_XM(OKy@4*NT3Q3$xOZ#@x^eH=40Pk($qaPC zKCj*LKkT_@+^PFtBj$Ox55cZ-&tJ+6bOW3Qv#VYPFWcCMzT$V#@8;3}=AcE@j<4Ig z*RF#luJ3)&sULvHkXK!=RNRwjg3gZwL=`NlCl1^9+xs8175iF-5=^Xb`7;1;mA1Lk z`c^zG!c)ci_M&{R1W$D1+w<}vvCNILy|o(0{?DVx_RMbPDJqRcg|V!i{92S>OY&=Z z=`6GIL?~Z6LCkEA&(QPfSVK=R^}yp`XgfBZ*4FbGV(ab|%>F;{w zjbat=8*MokET+m*%{H(V=X*#s;+(&0$?3CepIJ)ZUu$<@YxFvLE~3-AfX-VCLeW(V zAoZjxIzRf(zj4sNJB8BvyA?}SaBeCTT>uSM9DiT}0W4)DfX*qQAhIb41rb8NP!RCU zxn;C+uTyWfz^kC^NuOY^7jwlH*#tOXw;2x~z)YMhpxc zvidxF#u!&?Q5qXSzujf@tc)!R%+iIqkL4UT!DpFDwdJz2g9duQ;t%U2~%k;`7!Bzp%MDhoxUC?FrcKgdwI8x-?eEx4Iz}xSK z4*us3dNQs+{61KkDO{4V$v4t9m7C!u`P5RmV9P5@o4DitGE#CE?CT^)U2e(tG%|m? zv~nS7{jP(q$Qo)TNFpfZx?GlVMN5XZyg^2`p=Y{i#lL1OCYuCxQ6=5e4I(U{G+KxX zizqH_3cAOOCR4z1sO zk3WKcSvf`|%A5(RJEQ_Q_#fxca(t%TDrHP zspPv~PWLXCw0wV7zE6gKt$PSO*92MbWlAYZF5&}zK>Lh5u6z6uo)8#*ZI-eoeqcK0gl~xkRvx9?6{57 zAGMY4tgA%W$)ZcTbD(;_a;f%WR3yq06-495j!=55vXdp46J_T#tbrt@mGDA?Fhbsu$gux|9^X58yv@x9fsKjiA9qj!686`!!HmLDQYDUyR*C4 z#omWR@bpcQ6iI!Ye5cWJu>e*C7Vs`W9`BcIC$X*A72AoOShPDZ46_N}LZTzDhZ9l1jy?O2v6S-80?O^Ll3X1Gtj{sF)Q@ue)Er ze*K>Au^V>cy!9gnk(5VaG;m3L!8Lx|5XKLP9;ZICdbgcKEH!ECw*zacf|&nP_SP4O z#0<77J4^7)R@Iz!NyvuZG#jWPdV|TWb+eIj-FP2#O`7V5X9XZhb#a;Ps-WH;B%wt~ zOhfurTitqq(uMDZTr5Hg;ztC=-V}vTVVtY0ZX_~l4g|gmW7ix?u0P5IA&GY-->suV z)=)iu3PfZhZD68cRyTo2a*qC{ZZpR*>6@o@nWxflm3S%>-49palJ|PtAmfoyYu)v< zBh5xTJ{s=(201AtDhL2G-j=FfdT>$d(p#80U5o)}ZE$ zX^?oyf`rmFs7WIUtU=8hNnmw9ZRo@>o`>B}8#-8jvY$MvTU24fg+s#Y%gdd{mU&SA zWl73tu#;hhr6C?Ip@m|rp}(-oYF@A*zL+I0DOqZT#)iR`Jdfj`OP8Qx+LW$F#gvpT zOvRKeC#JoUxa5K;DeXB?&Wjk>l0JnpG%p<|#I-7QQ(RHWm>gGB>ZrJ)qNk|7PR+aH z*{zV>@(y{M-Tc|PPwPJ$5jrC^u)+?6{;?$cTFgFbv(~PycVv}cvLgn2`HD2Ss~heZ zPd;ol1~&b^=WXl126nJPTwF4#;=0T9=Hn&+A=o=YF)W3C zLL9|Hh3$dj{~qvgK(!FqclS@SbSe-8f~j5kT&#vIl<}P*dLX)2cSPr7iS9%Dnzj=3 z^gz_F?ufQyiS9!IOGNZQG_dZ7F2oYuhYpsA=z%C<-4R`kB|26Oi<-Ih(!^-E}t;*qcL`(nDC4xQFOOy2pF5DL1X_$z$q^PFLW-aw|CJkys)|m*96ZC zKq9~|CXN!LBX*_X_LAh@=%FZ|=*3%+yJO1V3ap?C#1LEbCN9br*$-9nl~5XW`*u*Q zVb>8nuR2(bJ6jRH1cP7=huAG^#5y4PsrWJ@lQAgPuxl2($r=u^Th@rx0%MI##-Lck zu8$0yHFT!2@BzgkcFP*EreLg*$>^Fj)A(G&uEp#oYdFMiStC{+j5RVDgJKQ4?z5Y$ z;SjrJjW88ptdYqW6l>Turrl%>huAG^gh>HojZ8)ltXahtZ+1N^DQgtRYTYs4-$Yk! z4ys$82%7`O6B&PmCsFzx%8vy}L5f#q`r-!%)h$1SMFQi8jDMH>2)I6QtekvqCm8aD zfJ>YTE5s4~DI3%4guOyG=cvuemRTDq`!ycp}Uh zC{JV(1|lc2jySM^qv458$&Hf}Nz0YC3cj4kBn*TnvU0f7Jke>laXgXKTZ|_%2?ODY ztQGDwPjqT+98V;@7UPLb!a#T;tAjhu6P-RA#}i4R#dso<&?QekXAtS4oN#OBQ&h;Y z9rNw)FP=cup4|GfA^R+a(DNV29{z}VL3UdA8D!;zHUQ)(# zUL|9!j`Z#r`=8RXe=Q3u#Me;flOZK&w>uLA?11e^+iN=57#r*Lf=u4`)oxY^1thgmb^ae+e6XaKjTf>4|XG0F0 zW8^?}lwKu3JHhoETuGW`SCm*+DNXi&NL9R(L z#5^fW#5WcO*7_8&1lt&=_;zOyH(YHdcSGlGfN5+rK0N3 zC7#UaZ?-PWV@qx)q0fb4Y}MSAG^|xwB13R`xYnwL^tC{I02$O3=~9Gp<4GDWat-gk zQ}_}U?Gkp1<1-t%6(veZ&Jdntrn1wafqaM@l<%Zizk}V1X01}Qv**kx`KPea9#{zl zhGOFJuKKub$&3-VMG_IMaafF)&<*Q%v8yqHC~;L}2gu4N?xn6h$qh&xoq2&rB9x*7 zN>unu3!7H_pLB-v#;TF+Dt5#cAs$R%oDbiUR&XgAfXii7I}wl*$5^;0L6u_&N`^%V zsz%f40q(K{SB@+w$*NdUxs3bbFW|+lpsNxbIliFOm&mf(6(qjqEJ=_`QvXSiVKcFkNWz#QtXy|-chKNH(^W{) z=0@rnP+T8(bxE4@X2>m_>f&>#>i(Dg7&%niY&y;L&9%ll4S{VwVLrar*;;qzmzE0k zQoTNFmrKjD#oByhwoq>?fbR%lC!+ss=LvbTebCu z`Ivd$alBV6o7;{H@_ut`)mg5st->pZW*fY|=|nFIEw?w;*)K@`GAUrL*S6NT*POeJb_?=_HtTf=~u}s42=Qu6L9n41-J=_JVMSgcv@aUp<~2kg=9{`^Y1Z8*?lp0 z+wJ|!wD(`LZ$j_$Cs(2OpKENr0KGrgU|I&tsuAzek;w7?PvM_?QRE9L@_1rY%p(k6 z2|H^i#MlZCOzY{ySOys;6zhajU^;u0jCnlwa-td*7`P5H6ke`vy}Z3?DY}o?@!wvxP znSc*P0PYS&rpV2?xX=eZbWN!H?db0Br9)o#W_!vHAcl1bN8Y8cN96B%OrjjHQOUC1 z{$Og=9E0CukF*K4I=$Y8g-B8~ZgEAU>Bb%B!76*4$%d@ohuF$omSyEe15{QiI39eU zIg(oL}K?kM4s| zd-B0RHTjs}40l#*t);bwn06;2TY#GRVwj|=LzWW)EuR&H#+09`An|e$1|T01NW99+ zU27yxP%@LnwqGWy3(m}PY8`KD3vzLSu~$fnFSSTw-hr^dKQ(yj>p8eL85eEi|>^VJalq1koRZs z8-H^rHnm+G?A%e$bzULY+1UepCR5buNriBlalf)NxvHB2!2rW9i8kyS)$?FKg)Fq8 z`N4C@;SHV#Gip>Z=C=bU3L_C#%2Qw^)t?!3X%L+SgJ^OKBVd+}fPE^HigqX)bNImTF+rloUv68SS!x&e&!fcAyR$huwt@w73LY0?aqSF^!rkeyEUvVs0)4^CRS9sCAni-Mi=a^~RcaNCWl8pq^N^ z&1Z-ms$5VjtA{vJGi-%6OXNbRnWEYZ$9xnpU@C%nft(6eD~U5SfHMpl!Vsi_o(`3p zch(xUcB|Q1aiFssZdn7$E|R%W*|Jkxs;xWgDn{(+N+O485@;J0*2cZ0ZCoV_Av_i| zc*HaKWl{~b2Eh(ovO*}uH}^4eDcl@neL=Yt?l-i$t-&T-eStg{hFy z$j-60yh0XCO#bH6wx>oQJM%+h{IWYWy~d}e>zfY^Q+?#*9~wu)4|C{KysV9uEuc?} z=+hGVbRK-E)!|mdnX$R0#yuKo@5+r=!7!uuj5;PCBaLK#FXNpfeLb`IwG3>p z=!$b}ael4#pwV_*$FTd2=E~|;XEr~_Ceyh~mv7&``NHC>x1YH+_uA!~FI--{`i%41 z<*Tngvsgsy(cu$S>w>pxcII*k%jFZ6v!lx8Ha5YbtqEx?=fsi}iD^Nq5m=m}z~rS$ zg3?F|@G6nS;8hCUBUbFbN~!xQ^W9e|cVDH_eU*i%DmLDgky2ZPVFS)D;O;rM-he!{ zbt>iVN)ZJKt>ro=by+Ll6RbS=hJzzi6s%LumFJ(i^3sj#FI>CrP65{!Uwr1uZRfe0 zmv1~zqdiz+&T?Y~Ibb(-I`mpb-}BZ}-n2EpSo9~Yp~b}iS>Kl;qdt13_-e_Dy>-?v z5}0*;$)7g8tZwU9CFP4Y%yb182$`~04LlQh%`i%zGSQ>pQ7_F5lF>t_rSQbEkn6DzzSl9fFx?B=ebashU3t1!~mYprci z!@!`k)!tZxP1PpkTyC`LO_00PMI$>A>VOm(lGjDU`m}WEKt9|-IFeVwbAo-9DVNjt520^;i zu-Ft32KBJxmslm8BJB>VJ(RmC1y^$##s6%ke?v}#D~kxtEahduIVQhCQQM z{gnK(7<)uqJ!JT5tXkb>JmyX;Krp#-YVni?nB0zl=vi~ z&#DOS5T6#J`Lv*-k$_K-p*}p&77#x5ZeNu+CgRhos1A^04mluvJaQ7tCc-*&z859> zpBn_T&mm0f!;EZANJMBiFgwLIS<%MW){Cb7>nPhWRl7@bxy&jN+IF`jOxC_JR`sHO ziBK{9islqhzq{kpKk9I}>4)91;z%d^6qJiT) z!c=gCN9cKv%%fdV!UH^#wQ!6_IyF2>$htVjmtK@{ zoG+L*j_}1lqm}uxE2?*ZFS729@ue5#`%9?2VA{9-P+I27uB+E}jCyVNqF$q10Z3sp zb(kyODZIoLTV#OPL6z{6Wcb>~^-a_L94H*?^TX22Zp)f^;DF@FTdL21bu{z{Zr)5F z4|C{KysV9uEuc?}=+hGVbRK+ahbkh2r>^DD@4$H%#_ZjOcwi`q$bMu@AV2 zWoO7Kr&gCj5qT$b8{1o(+gtiGxtT`-?o6hzPS|1}nSDz2@4ZBx^!^xq! zjFm0pWh+?O3SM>rE4yG{M68H;Gi+JJdjb~sFc<6`+NzDTYNI6!NXY_PvWS!{q9sd6 z$r4&}9w|AGmMkMB%V^08QnG@UTtG@L*pDVT{I6bt8!Jy~Z>*fx+*lbiWo?A~%F_gb zn?5GHZ);+l74!4+CL#NqEtr5haN)xJ-rRClrSEn2ox2blCWMm-`Q3s~HyL)_Lk;Vo z2+9Z7F%A*pyJEsMkic)a;t}{wuW!iZA$SdVZd!G6$d7AtLnteQDW`A)P(v z`N>Qz9)lZ7)VVJ`bI^dqoAh4_{|-CPGHtIw6-B1R<}G!-Eb+0`ZCV1;UU_DG+?-Tl?7v69V)qV?8`$?C< ztTpdK{NpQbj^~Krdw~Ge2DlFMz%oY`5Bt|Rvcq(`uwEtyKy|Cvw&rMnZLe+37_TR$=FvRnRlbmYKy+>5o|uX$X7*DBonsDhx=niaXD&qMlg=nOO{#{+LxjnN=JR zvr0R{td=}um1Jg(a}B}giOepBjQ>i^D$xUR8l1PO|GGO6izvHF(d>#?Ff+kvYkueX zH6O*Vz{JIn>5pF}lwb3M;#Ya+`BjeMS77d9$n?jrd6Zw}LGi1y^Zcqr@hdQWF=YDV zR~hA3r4N2dgS{de=M#&tidsD^yMdWxLEm`3x5ht3h&N5;bNbfp{#%#S5tT88{#%#U zZ?*Me|E)_4p4xh;|JJ4TL2Z4$|JJ41RBgT7Tk8;Vj%}7;b1{%T=Z~dH#%od9c8+t& z^hdj8`m%~Vlg61}PIzsJy249IJQIF7;+gP75zmAdhIl4C5X3Wa{%%n}c&3kw<7eri zbRH*4N~DN~5-sAPM2vVSQFF}WtJ_mP*Qg#tcj)6np@~p9&RgKp!-5z~A~CG^8I201O{jTuW1*01WF7 z3^EoV!8~{iZp|(=mfX}n))7x6{NA)q1@kw}lkQHggvZ z|CQ)do$+5eyKqxbz^-eg4L77AAr2RP)rTU1UDSZC;^x+aiv-NAL$0OuY3nWYKCXr~ zK|oTWgp(yaDVjWkN<+ZE)6o@&ARx7H29d5rjC0XLdLS_YpiKI0%O2jQFX8>P{({uH z>>EAKdmL$7)7yc3U*GJ+Vgt(>c29M=Xpnl$qM0p$RT#<9rA zcI$3y<9;g~ZR`{o=+ro4enc1wXh8X zM%HnNx3IR>fLwF;Xok2}Z3W^7HEHHJh^>%xTF;&*7DU>E1V$UJwFflj-o|#j-hd?T zv~SE2vd>BzsdRRh90Z+yxw!%n_%^mS%+uycGD6Z2{Ae7G;z;^A_%dXtUL2|z-)RhG zpCiVyxp?zw^6{u{rO`>+jjio=i-wL^a+wQZB-S=Mokqtg7K$bCF!1AiOY7ORrceA; zf;(CEuui`%d-z&YZ)qU);bJ$iHDN(r6E2bqv1@|so@d+Li*EHTm<8VKaTb`-%!0)9 z2L$;yf6X9YI!aE<663%gl*WSW)Ii27Fb9MKL6*oPuKX@FJ2VVrePi>%MvFa(3J00z z)gZcureY`r-DPsgZP!Msuo;VHlZ*@v`4XwPO%&pr2su85X0eZuC*8)1@r`j>#6)L4 zNoL&2C8RPWnqaX$-6(f7SY(b|a7)i4r2|p9+!{1wr0Ty14nu- zRdg+dVm0box=EgLaaz!|6bfb;-wC(K({7s(I5I3qy@ry!LW-X7RVC}158o_7=#9_UlkME0PIlJC&)Pl z6_Z~z=%im56Hzy+mlS&PadMWmj7g!}vOk8rqHx_MEL(27e$jC-K)@V%&!#JBXKQLC z5SX%9;DIU4mmBRB2x)j_wNbyjxzU6)xDO48bOdvZ_y_Z*wKAKKjcW3 zQ}h8?hmi`C{FPee1bd#t>bTIId~Qxt2?fL}<#mW!RRI2DIEksmW2vuT2gmx1XB3&=p5kds-R3Tz;(4J**!X3-1 zp#93Kgu9hhL3@=|33n>1g7zt^67Et~1?^E*CETH`3fiBnO1L{&6|^^56}2<1`(%P@ z$<6xB#!BNe&u*`+L88F=s);@gg!>B`@UptQPhhhf*{p4WR?;#-V|=SqsLst@%35y* zz3h^u6f-zomqlv8z#zn5}V+qF+KqlA96rm%o+?p|<|k%8RfVn9}z^KUs$v zo>6d^PpHNyXI&-4%{RE}<~HXi7n;IxeysFtXYv+psj>7d1cj$@dFS95t_^uCU4{g? zkkg8lBnVi3$06%48sxA7$W$L#M+F0Y{#a*}Y79T|{;1%F|1cri);5hzjIG$_k`(sSa1?Sy6UF#jEJon z0~e+TVGWn>>c+>+^ey{wAnp@36(b6ezN4`X15>Z5imP&0+tF;MSJf*`S4YFNPPko- zDU2#Q2~RR*q^tN+Y(XLv0W$PI#C9v_V*xt7=!M%ew^c^*%zJikUuT6*C0L%YtFGq<)bPjFXd?1VoNHBLqbF9I2Eg5D*UKT~9!e{Rt)@U0DcK zv~Z#L2~Q~0eNad!x&rPA0z#OISA;i{q7l3!jaoV64HE*DfLI}j0!%Z1`3K~37#Se| z!kJ8^$QOV}2>19(fowFfzE5D_Rq?=k{C%&-)opC3#}k0``WYcY#U{c2U=;QOQa~&@ zPz~#273G*d9@T9YAQY|yM^^bnjD);!0#r%i9jtHuv%GH+ivZTQ3C-~;8hHQ0!3$gc zRhvVoe-ncB`WPWb#VA1^)BT>r`>?{Nq8-!6MzTJZwLPhi6R3L{nyl~jCU3-sgLQIm zCnze1@jm~G*XOFyDAeb@Vd04YLaK_OeqF~DzYSRy!kV8-POL1%K@SR_tolhpF$mqy zmkz{SrORtoKRM(U;=K#sD=% zX)Uvubo6`DsdfWCi$KUx+$bE9lYa5>BnB}|tElQnG`3F&%qL+TQMGpWAu(Kq4=iS~ zq9{pB0!=ZFO6!LWqCa=TM5gb9Y_%hBnfXhz}^_CL#!;zr%^f~kC_4a`Mk0cB{xO> ztS>97Q7D-+6gq&I50vS5DQ*}I3)B7Q+XaI4ytWWCk_e0`{IBQr=k|a zsR(h9)lV)CDjkCr2VDKM6R4jtL|R~|=CcE0=)=UJ8!tbVat#U|u> zgxd;k)GR~le#8bC(fx@$HB~Ohb-!vK)OCL%;C}ZbgsF%{a44et5sgu$U~t`!D~yE% zR?Z0GAjBmf{D}Dv>-~6VTa~qVpZ`^V$g6h1P@l(x^m`g1LPbYo@(uR#iahQR9ZUE~Q2%tz`@M^p?;?5^GZJ#|XqBCDy{p>sbiEq^((he_2o)U-BO-bi zQS4Nl2h+Q_LRUzj&}oRWw!E>6nA))3JR>!(J^(OYo;<25^wHeKTnt6#Z~$?O@){$9rOU;cV# z@oO1B@B2Kvn|r^xwd&k&G*?!)IIbwJzMNwVU*AUuQ0tuv|W2 zIXkLcZetUIO*A1s?wsRaiHpRvAk_$5i;Du2mnsQLBPqbEL=uBnDRhrmvHL2e?yJmq zU!~lAl}h(j7NV-ycvnVBZ4rh|t>e@-n{(?8aK*1vDR=LIML|Mqxyn?Rwemf|%Fo5Z zI^|q>{+TN;-MIe3wcD2$uRh~kUwrYIE4Q8JZeG6eyamhsaXw+Wdq;=f1@Eji>#&Q& zo%j<^T2Fa%*8F18pRtA(6aQy@Wgpzja3@o^m*M`kEXAF_y3Fq#RCCrTWa9OG`mJdV zm7;|350k@%`3DGcWHA4s6a-9Em*SU+hf=UIaZZ$%AW2UpP|{Ngmh@BtCOt)=$!Up1 zSsR6BqtI*=nvFs$ptKcGXay8n0fkmTp%o=)fg6~rI?`VX-$%**rL70l#A1;t`_CrP!|J7&P1YO*%!Ovet|HM6=idb3=*@+zq(sD35n%#PFP^|fAT7!?&MXl-sDx( z&g>%wj+|z$Sdh}p6~#K8y{yK!$&pJ@Y30K2Xwt0k9F46$c20Y#QghjoCQd`9NfUc$ z7rFBuGN+l^TYmN>?p&wNb|&+=Qy<&&p5+diZ%uBHM_zffhv#CGpRKFTOac-{5Ql`k z5|C8fJxJ@l{p&)O3DC)4}P}Ijh6l6ql zD99ThiaI1Y6!ozW1sRPT3i8H>q7Fw6MSbi;K}I5ng1qses6&xMQ6KwIkWt8?Aa8sq z>M-O`)W<#)WCU_3$QvJuIs`cs^>Ou7=9oD}G#38Br0e`@mVA95IedQ=tXIyRjim=p zxty<)Y#+MI6dZk|V6l<{HH7TisY& za;qLEM?;Sx6xO7PzJMH^_zOtZX`UdHtf@e>X$bC_HeV;V*sCoF__xunwcE7^_iXo6 zl08{#cD7a;5WKUo4&hh)GTRU+3#=gg5iPXb-dJY^py5dYbG^2;zP;wK0}eO#=y`IE z!Q`*i0o_qtNuLYRAY#)eL6JO=sS9W8AY5ImeKJz)v1p z$RuW4@#DW2Ebfz0#siov%zc-amR_^1V=}L*o9&II?fMqb&L(XiUGb(Dz?)=@fmwtZ z3gDU}{ENk(147l$0#tND>jmQLd`4$!K?jx`)UbZTAO{#NZt^!TK`2|rRA|0Tdu--p z`_BbwZ-->*8o63M5|pJ>X1eD`Y97&Hh$+Cif2+B&zR`qU;a_++FlrE7lg59B+apXJ znBxt*i7FT{6h>a~BQcCMPpDxLHPbNQEjFSAOqOBB?KDSHA)G=F&>#r{>;2oc)~Y#7 zKT%5T9@+*EKGrr>2l9*-Houp$wxavU_jayvNXuA1p`ipZnWc|KDvRss8lP9>E$zt0t6V(N0W;wNx zjDl@#K`u@(_Ih$BWdlQs+o9s(1RN<|2o)EHNJ#NwsJP?`AI7~DDlVKxW*%2}@#gU$ zq5iZo^NHA9t{K0jn0eB(s04OACcX_4r2|`huXNy{lbiW*V+S_==1y#CVOuxJ0-d*E zD+DQ7=#Q<&XSU44=Gfh5XeN}KEK^waj(Ujr3c2nd6eiQ%Y08YKBuUkC8DgG}Oi)xk zpBXjJMPv(Nu_2&XCbAd{1_GwSkqA@asm%U}x3iE<>G8zrhN`oWIjM`{r(lKG8!=Bo zsw4f|8<4l?s?GpPY)uLg%jEmuuwkr}nieNaW@?&~4WZ=oPm`}1erUAo_!Th6NVz0STflOlWv+n;Jdgr^ze z=C1-f7fpTSSqcs75Fy?#un`7-gBdmO9;~N<-(V4h#72;i?vS70Ghq_wwCo+8&Ytre zEvFWbLAMW^sZ4t2paG^Z`Y#3lTkB-h2O_6|=De|v`)^$o{Mpje4vefW|6K~iSjiHY zwr-F^A|dGjG9L~>{vje-45U2-5@?rN$HjXJu`3e78BMKV`o{b~i0A~eH|7yC>Xw|X z(S_PCA+M0buK8tvh8?i=?hBZ7rW6Q1&K6IuCgkX&A>&P!T&!lO%){f?gEnG4_uv9)>x7AME_zHQc_`n)d-u!p_1xz^mWxGYFee3Y%jj1d2U5QR^K z1m7k(_m=i_@=sMHGS(Zx+zSzhD1nfy>Filg7>+GY@KwX85_*W@{xjfq`nbO;!0_5^ zwEdB#zXOoyq{dsCEC|pefshxbL+*I<*=U`R%ITJ<=uil0VATju6K;B6tiw{f%w%|l z=I!wTi~iHo{C1F4_Rh1)*0U-vvx=dD5EI9$H_51 z?>xWCQTz(bT@0E2_%)C6t2`)vRd$|Vl_-7%rZ0v}fBY(={HpZ9FKMt>MB{v75vEtG zhh;Y~lPu^P&-d0iB&GNDg`B>1yZ_en`qm5mw=S*UD%*?ww=OMsYU`!`TbI@cwe|V_ zTbE{2we@mut;6vh+bqH60(=?0`^LJ%8+j{>X9@eUG|6}^O54tHE}8ylw@hDFk!R94 z^UH~I3{h7cR)}ZfL_$0h5n#kK>0+x7L&Osi<)o{waGix+(3X-LSmL2{{Z%}aND&Vu zTEs(%81Yb|=9tG|lxw5^~YTdkc4SI-wv&Q^Qm^*nM z6_z|7k~JwFN{3d~5f3rHq(J42CF z$ikr3SlDchwApbb7k%{>c>-q`1Gn}*H%T6ETyvLEY zHN8D^PCoQAMTifv1_z4RQW4Psfg%N|h*+NkMT$}pv0BfZ7RQlhIOk;-=kx38`TVEI zo3Q#R<76L%s z?%1=n+=30?@=JE9vQ%1HDrHZT30j?o(P*^WjU{KPaSv7!6OJ^VyYXt)BnMYE9C`?R z-_3KIx;T^_Cj_EMHDI=WXQOSxlj>#A13nh%EI& zF`!h4FWk!-m-_9CMfZe=D2S|%L?ts@=$4rr-q<2iFv*(Egx33^6>PBnr0etIhY`5zThq+&v zBV?bIHd5*AEI9~D`*L#yhVRDKhI!gNNk&Ne%8ggY;jE3&0X#lrr(PVY7~g3OWuGI) zvblKkY4Y)?E~R0s+KsL4w(EklFB99L0GUDF7g_|;uB}BxWUXh00k==Ti+91oe5j|CJ3^n+$U#q1k2+Id9#J!wrV ztue04WXSPfaym;0NnA^m=)GTf{j{snLC&qF&*1fd41CFkcel@A1>-e|S zeP!SjBN%WFNgh=wB@!@h%mQ_H&(B>M7|W zagYhUTp@mm>}QvE`Q_nJ7x&)ovRR*G9OV~={f=j@g@nQBk*h6oTGAU{vtc#y!e6+` zRQy_G~+mSng+x)I&;Qmop&70mvV{~!E-Dtp(-Iw-}eeT^v z^N?#0f^-U<*J&Uq>m<9fDBpJS`+|o#nL{pDvD9C%a*k2fiW*Z&Cs)0Dvea(qUYX&O ztEJheSY@e&s4WGD>0iY4ZhcrGrG&5U6U4Y}9wuY13c1CfWhuQm>#z~ z&DM(3f$LoLRcF1?UV)9zMFVa+OgdCjH`>jWW(&-FjC3$lP94a?Jt`#&+Td>(W~U=f zoYY!MafI!FYvgq{Yi%&@HXx+cIz4fKm%}vL7sMt#5{&+3R~G0?u!}CqFX?ZmY*#Au z`eZQ0+YS(Sf*kb_mZFLQbEmEEFi1`khSzKm-TGZ=f60FD=p6gI2APS+!Q}I#8w7$P zX|ZTR-nb}O3-OI;=FNypaADUGH1YyHnUd#P*KJ@`@xOpD2p~Z{Y$y|4=}@+P9MZsc zmP6s4r3YegJIn7*sXI$s*;zgj-^JA}HMJEtS| zojqL9>HhX8+=tWM)9H%SNiOjuJPq7whmCiig5w+Pyhz#DOMUEnR3zR{eOmHKkX<&^ zOG#o|(G_>zkeZjBHG&gjQM8^Iak*vLRU_DPsfU*L+%%%YDBM#I>KddTcLKAs{u;SE zrR93TBXpKtu2)6g<2tBtJKR;{u*%ZyXUXbYOH+#{53uW{u*zP zo=yQ1IBtAnl>B_!Q;S{Aw^+;)Xn#-VhhpCYRiuox?tT(i{g| z7M8Z*s2U=nO40Uc$lGq~K=6sK>(DsA-NKf$QQY%Yp_lPIHyvLf zy>lE-IqxQn|DLZ3%@V6ia*Rcf8=p0A4Hs{)zo^u8+Pe0k4TWysp|RpGhaI`rD98{bs9{IKV% z!h@f5|#7OV0YnQezDQWHnpP#wN9Fu%66*94>u1auN4}9TYilKB9X?SX~R8{jxtv zVb8ilw|_c`^oGN2B*@?Pl!`xQCMC5Lxdi#07?Xe(>87FHS(zzR7lD zWqS?Ee%AHP<6*v_iT&m%sH(M&1JMD=LCDURaahPVhh!n8-vu}Pn=~iTXmHdYHyAIB zoCK%J<6cj=9qV+OE3F#D7i$0%VvcTX!ez@QBs3z2G6ziJzwOMNg1~%R%ML@M(}19@ zt*vHl%^c1c5Z~-yLUftT!62byIzkWW2|XMnbUZ-lk;H^fW=!*l(*}-gtUD`>7VvIs zqfIiIqvmkEv9<=(O#pRt;5ePlMt$ZaIVz}Yw_0pToLz@Jm5 zQJgtLj)73t@47+>_d2(n`|a8$lRAmt`DNJ!?Kx!)zO1EXxl z99eIG3r)R4&SoAlPqHzKkD()(N6j%-z^gNWkr^C{k;AK-ae4v9^o%;D!{gZu|RimW7Qw??ALe-eM`$!4L>e=KMMALNRh{7-9wCZ zMNcf13wGk7^M#~EE2V|RMaxC=%Le%=<4fr;!d<>+x7XG#w^we#fQFo+!?pHG$2|9b z`V7gu16RV`D4OJ*j4M~LB9Z;?47(w&$H)jw!)|G~>1@I!efW9zJ`EBH`fY|UTpjbk z!*qtEHirm(afFb&_h~>#w_f`F^bn-jJj5RPHE2PT9H_Ng8!**SZ)-FC#q>}Xo_|*E z8u%f)27ZXHfghr4$S)2ZT)qW7h*7qx8>i{2HuGAk9qX!0*I~tC7BM8PFjZ_T7@Q^f)1~Gid=L}fG zXH6n$P=_;@%mdO5e`03l%tOvvqt+ah648*Ya4aYoFu&reyAS6{HR< z$@5W5a#>oEA-|8eyq?W0#4X9y@RHoO4tX%stI~KOb7TOk?mwZc?nk`Im(I~2^?uMf z`eWV?I!Axp`%$p}Z7M-*=TbRAq4`RJLgj@7g&>qPWH$L#m;ruSnE|SN2C&~x&q=E* zUr~v?${v1ajP4p?tH9!im{H5^Nokrm{4jlxF13SPaz($u)F}D|eu#d7AEIB#FVH3T zDX*VscRua?pxt@d`$4;N(fd)bzn<DjnFY5Wp&iEU8nt`#<@`Pj9v+w$=k{2jU%eJN_; zyDBYwFUt$xwYY`vdU)aM+rDrx)2l(@LdF`v!uPLXjmX^arof#2D@l(UG+O2)Ut`AJ zPlM2TJGjW@W6)YAjAua*`QMblR|J9o3M`qjG95ZgiQZ9FO7=XphGH7_w+8KkR&u6# z3T&VLxzChgA9Tr`V<+fjs(Q>j`hj=JL!*FUmWREtt=qV~3Vvxz|QBaqL?6II}HC~6IN0ERu-P8fL z^e7V0q(_l}A3cf$#F8CF0?D)ypp`ZPMAAlpI@-vmlQi;W!~T+C{il@VV6D zkEZ&uqSI4=5AP7^pFMEM@!@69`>Dmh+?VvwGn^0W(CVK#^nmI^I`8!870_d~c&*7j zM|msi9L15&QQnr%QEGALD0jl=D19HO9nAE4f_5SE`~c2TzG~QCG0dMe$WMaK{^N?y z{siBeJo0{eS{#bdZ`@ZyI(m=Xn;ex++D9Iyhv_MM-%f4hgN%FJac#V$e~Xqt_9_3= zSp3IHkLScR`K^0cMZRGy{>K3v$9*7Zy#{Urb=hrTEVCrL0E}iJ5!DuWoUW14%(A(! zzOmifnwcWw!atn(XmRhex1hb1V0){<_L`!-cX)f?H+L8O<{;%f^YDvkf42GrKi5k` zp5u27`_$#7+WH(j0Sq4lYKq@;ONNV=t0mq#J2kW~;H`&_4mA}+a6%OytDXml?99=& zaA@wqBT_Kbx?Av(>N#l9KV7wt@ z>MY5yn_x~ZT-ug|^(67lkY-H2dteE3#$ShHxr=jSOsxb^vDP7C)vHo1CH`eAyAl)ebcQBG_` LlFgu9?nwS01~!j+5ALv#eU zOut8S@{dzzSw^G(yUe}*mEz}P3#g9SW~%e{JDEBujobWgr}E{7%(X_8_Xqbv!TFQ8 z@x?vVU3DL;r0%s?%{hgdq7!&fr6!d!oy?D~y9pyMH=^N*Ca^y4#hZ7Gpz5<1;rNm%e8=xp z7&|r;g5N9f>*zuaHfy20_FF-^kvq(Z_u*}-GCW?-ivJ0{fng`x1rxrH;NL3C!DF#5 zic7R$>xQo+xc?J6#~;H}mR0b3+fGm!WUF9ySxcKo_r67CYtzy}gm+=w4QKgARr z`PzlMtHoo|fHn<2(1}0e{dl#>SpIOF0$u)aIJ*8@gKL8qL1M;!;(MY9<_K2c#X}EZ zVM!I#dCjLm)w)#Asu%MAQ>6FElW|PKy6kno0+YJ-O$@F_=|n&PS-)@~{6U@$SLt^w;SE+@HH2lJD1on#eFr z5ROE@+8fOAv;+0eyC+PN^1!QWK9a#aFVqJN?-ed-tt>-x{&%k#eQ_32YI z+Abh^;TOTna2~sI-G>`>nbE3TUG(b^=jHFM>7`NK&Tkj$bC0n}WPRT$h`%*}Cd*vt zP7^D>U$p_fpUdz&f9&b^5C+?3DDr;HfDI>&aO_4mzG>YVxcF}}ZxAa0ozH%B@p5Gv zKh**bg(`8(Q{;is7a=zG7`QFIPhyuYq2uQ4#k=P_@rhSFko1di;D#lCorTcNh2-Y$ zFNjOExX3qWtX%}qcR`){t-TB`2S-uW(P}(S`wV{6m7@FhyhM{rzfpvIhVi3g(D;cK zZogy7fA(0>67h{d&R63I`#kX7;sLEnY3#V(B(y?bT9>z!TC`A@5mJgildE8PavnA} zjpnr_xZ*m1xJLLFmnO-s}CXl|(%0#gV zqo}z4Sn|TpfmVLZUcF$mg;yRvjn}fXgsKmop!*0v zp53Ma5B@o${b~#P@#q~vxRQsFkWK+8;8dTJG1NIcyUB{J91s6Fm8zD;!!Xy{OB=Zz#w>NZXi+7w#lN&xAH- zt6%5I*+a4<#kv$K0;-67mJY3UHRbOt?dWmYF5Gyl73at|v4vVHV6f@JkbX3h_KD9> z5aEM8JEn2F;$0AVeJtO)!j8M>`SH=uvrtam8a;M6qoMQ&dfd4e3+AZPYZ8{+b-4@; z^^xX^C++B+?bhJiG!=5{j^cBtL9qIxNBe3N>GczO)HZtr?YR_0?=G)^H-@QD_o_s& z+h#H`UU3`GFG>YP#Vo+e1Ei|ioKwwgXU(7v(&yQQ6CMZhyptX@<&7*)oqR;ldCZ6h zeHq84B~-c9$lLhG<`SeWY!()ppCaE~-U~i$Q{+dd#o>Cl36xBkNW)$`Vn*`_Hgbg* zfBb2guvT1-^8ig&+j0U^FPflEUjiGqtORrFLvd;SW1KwNfq>3xemC<2lscQk$y=wv zOm9CrWM*UO>li3dIRU?!fLq&!35!xDqj7I5NT%%tx%3)Pl+%Yzy1z+h?N1g`dlU0N zD8Qm;?hsIt4C1k^VDd_dY_`n6P!TKKCEbfk6P`0Q_p>-9xDcmJwWEGrMfjjX0JjS% z*7VmAuc&N6>Job{6{1C_NDg4$qGF5)N2rW;!Og+R_*GensohORwA|2I77jPu$X)hc8TT zLyOT*p|vRGZal%;*f1{8k4syYijpY>7cLZ!gH0dR(wfCKw}4>p=|LFrt2` zpy+ES9Gx|sbUBR1PZ3W1R^f3$XFH0q^!>y=m%MHQfy(b~K`3(C1j>Pr7 zcHC|4RZ``mLlVBa;;zy{9Pw}}pL0$W)+_1Jtg#ZHk(@}PUwp&c^Ueyt8WuV4DJ{Wm zN=x9|m^H%H(Vpmkz8^l!p25G4OMz`!XUIJ77BDkw0p;@+cz+j#6T=E&rQ2LMHeM1w zukK<-A4~9*fjC;lW*`PLcyiE-{_!{>)KnXQib_%FB`evpI2mYMH4J7QOv4aK1DYl! z&$k8r6GRn!MTz61u%|bJ2(P=buXzixY~@8Pxw;;tx0nEYScKJ^C*W~sh z_!yd+u9F@|V~7}di_H(^p&_spPo_kIkMu{jQ$CswEUADJIZ-xi8(`V)By@_E$EG|^ zeiX%%u({`1q-i!RJ8A-UR}$gdl4fSNuN&{r$;YE&4LIwhKKuyKZ9DL1siaLr6ws2&BA##^DgmAmDqZ?W?jQzLlNBM(p8^ti>;$+YW*HYP4D!(A^Wxr5tE z6#qIO-r8BYR z&^$Cxfwvv=V2VgFIlb>0Cf{$vV(mEGnkNNU{~W=xfmTv}t%`KV2QmHG51GG&3QbyS zPd&fJK%;d9{`-1-=vySRNhe2R`N+FyKhupL(-*+^hW5(HLPx&pNeKp*Gz&jYJ40q& ziRA_LZP2?K*V+nSy?u zFG+111*@PxV7_Jqc8ePEk4jTv=Gw)OR%M9Pp!sL`Qo%YFb29CM#7GtO4Ks27RHLcX4#fAVE>S3J(A*u z)AbDK(P0mXXy#&?-Mog;qffx2sug4`r$d4#fv*c?ux~*pEU{RGmNW08lHp9YGg)Di-9=u0k2Z^%$WR4Qd~>`R4|TI~Nw9%Zc%rA@P=ci->Wq`Yc6j$Cu-=!Q;Z^ zJAT04o9D>l3#UQj(p&gkd6L{~Jd2J;UxB~J8)0*<8-%;|fXWId{O{ikSS6pyb`1HM zL-q|g_|Sp6S!{!n3K?p$egW~gX91OWx&#$zXK>U7D^xu73jKz+!|t>-EYL3<#_0fm zea{k2bio zo!$mpN7iFD&4GBEQ;>cCfS`P)g7B3<09ttm@fI_ppVqD9KUMyKO@TJ;&lzBj{#oR& z^ikm^sUj8;VgxZKL{Y*Z1an%h$J_H_%XAN=5a`*I)1CwIW! zlhMp{wlSEek79!s;h;&>X}W&2VEc@TT%a)uxbH`S@zfV2`JOx5Gf*wO8Y~6(Rq9bK zxQ4AHu9e^HCi2vjYos?b4Hd5G@a{1M2d3yu%J|x$J@r>T@PuDIC~r7a(>~x<#t%Qyn^jYKPh2 z{p7>LM7UqQ62&Ah6Qjvmyh~$mWy;(SuyMjEtm?f6%dW4aJGxWZmZ|RaMq&XWPaM&; zUIu)2SAnHZ66D3p^7p&-X{VbDI$xIHTe5C~Pw-_duX-x1DL28r&o{%;8Yh}FEQ1Ur z8$-Qe9A@dI;G5`hGWLf&owF?oBb;L^V-mJtp7ADh=`_GbiGi#6?{nBDmqB!HiHB!f zZNcuuG5Fy$6JwhjS**7bJbTwJup;SfQ%4&_Ec}Ga1FeOPdyYVw=0udVQb5)82XOS^ zF_)AjK5*<9R`7LQQ| z43Cdjq?)r2UR4#>0f!YqIKHAuxb}!X9GgF%57viJn+twuzbzS8xZD$jWUav34fE*e+cyW`F=j)@N+X>X!_i@geV;DEAnwW-k-kK6-=7nib@w;u*XVSI91(9Y85t zSC}{2gfBX60>wvvlUScB2-N%oB3re1@1|_nwf7&f*mW0XSSP{>?-1H4Iu{FE40xd0 zQ(~4L4*kX($+g-)FfAbzXBA`t8}^Z8IVM9Ssb-dCo?vq-5A%Jl;FdAb=(j5r+scRY zgKjo(+(HpPP2D9twc<1JvM+!O8)INx%`Nb?R79QI|QY+jkK4t(=V5^ znT2jc z&}y6miNzVz7l|lI<4_;+7ncX+= zr;AQlqWbSLJiA$pZTojfcqQaJ#%Jc@ZMB1Vu(+5Nzp4iFO%w3xW=-lkV3!X!Z2TWA6!Q)AeE_C zULXp3BOx$Yf{xK14nj#Iy6Z;)xDKv_!1Q`t_O6fp+k6ks?!1l3)zeVZt`XiO>4Eg7 zi|}{4BDXs9kmN_lLx-)UaN_Q09NB2em+P1d#HQJSZ(soF3wnlOQ*|M^K!HCi&SPyB zPb=H>|H1L?zuAM?*{tW#GTbm(0$i_Vu>ET8IO+0Q@<_9oO?_m_zua)c@aHY~IJ5~> zFc)wBTY=i0E}&^4!)s(Rpkwa`P@i!EY=Tpz;Esitah~mblnd8~Rl6AaZrBZMj})_8lfhC`b?8^0 zWPHAIwm@PiYclNcp;?I=!BO1}-CuTM=cFyn>!N_XTXz)~n?z9cAD`j=VoI|>eVZNp*^!OSCf+1Mi(vf> zQ+Ak_vG&vl!a0_oNX|4fCTnbkt2dhBsio1(sQnFGdNz{{8(q(i`fG8SkzG)$SC6xY zKVd7bq~ShUDF}rTaAVypm|V1zrUdCh>ZZN8^6@J+%BGr3PpBugHSGeElw9y#pM+aJ z^?_B$WZ=C&Q6ZpN*rL}AMaOHg+AvedLNqb(!7v;-t`P4R=<^L%5*YU6lG8TFG33b! z+%4x%(2`?O$RRw`+J?@@WKntfZ}Q|+Xr;`>I(MQtb;fMCFstQVZ}=X0%-+%Tr!IwUbu$C ztDeA>BMLaC;j_~o;S<5;8`&@=@dDfb%#b;OBwDA1z{pjX@!&EwOzmCB1O4WZ0rN27 z-+?K(`BD`7&p&}g9$myfYmI`9*J{ySYYBwYS&$dE9#7RM!nmOztaqm&-5427EMjM4 zfZcK&@7e`kd(7xA@e6DmHGyrd-gsVVbX+7HHLV(<_WHQ z|4}&qPlG@+bsuz%+=r)q$I&*UF8F%a4Ccs7@{k>`D?{tIU=}$fd|6kF4o0T*S;STL z>4OgX|2c%e;`d|K*7I;lFW&BL{)5=?3rUw@mv*zwm|B2e?p5slA&XaqoPKPSUB6_-hRJT)PN# z_szw3D>SIaxMWiNK8^|omXO7#5>ef1H>1YdSW~%ncrZl z-_>~DEFYefUB%1;gYkgRN@14g3*tMc0C##D;t{h%kbl}h&W@YLd_J53muG<;+{$YoP+)yyxPc zXdPNvGl|uH+Qd?Ri}2;U&kGZh(goord&gqOzLgkKAelk#q3a%9Xouxm)c@uo5O{Bk~+i@8Et z89`agCG^qLHgafQG=9+6Mf;}*n1_xTjX9G{>ZY1;CcB;_#dSllkq-CK*vr}{44u`a zesl=$g;)KLaA3kn@YT*nz4LeAoU$%euK0)IDU*19W&}8|aYV^4I|R|aHz2vjo!pdq zfI08q2|V_mV((__vjOd9L0ID|@Z7W$_72VSt0@V%B3uL09E$NrKp*BWav&0R8gQ+7L;AcY0n^1Lpyo+G z{0>TmLsM@OR@f<&39Q2Lt3-I!!P5}*Vya-@_Cj(Y@fduq{SV)T=*T=s>O(>I z?qnDU{DlVk>tX5(6PCYg6gNI|4K{^56ZBp6#g5?N)V3m=&AHQpYi#rIblwd9OJog; z{AW%VM@d2B?ikp3T@F_Vd!mbOI?4?a&?=NC15dM@qwoG?)ds_O-j`iu^tN>-JsxJ~CKNf453`5-?wqVPMwAdE(~apk&nkG`Rt}R~#zDM{P_U}h ziQj061$ucUR9<*Nyss_6u7tCoFJ;f325Es|SUc0To-w3fqP(~FDooFAVG8C_e1yL| z?$j$A^0mo0GjKoZcGtlVx4jUtsEMUKxCrOQxWKF{1GuM3jNWlxheUTjj!|~uZqbuL zWvv1j2Za#Zs8)!$D?#75L}Nwa74#Xmlzp7XaJk8QwoE39w0vrU!dd0GW!haZaGlD} z$xvRC+{ThbXX2NsQZz(;KEB+wA2Py+@v^U7=ysuq*-IR74)>ged6qjs*X0r`UNZu8 z|GHysbs#o{6=BK0>zIEc0P|P+gP4Z6@Wz4ReE8#EIHGkLPT3?uO`j(TpGg6AXzf9> z!x=a&UY`uQp2d~tq|kMPISnarMAP4+xtCu*d@C`>a}keNTjybZQ+*Mnj5Gxe|0ptf zy##$K7sVp(x1vRBB;0wB4^_J*>1d1j=(?@~R5jDc59djE$(dJ9cI+VC^K(crX~)f_ z$8ng|3gWilJyb7n0Ufx=EQa>g%zDK7+5kKxwF!lrexTR=By6i=OtM3ckNRx@om0-? zealZ^I4+ctmlCvJ!vvnUHVKm#q_ES*UeM!}2aUqv>=lk<6}^LG1#5vmktHx z*&{H8Y=aNqW%;=FGce3dtvDL0nCpZuLHeQ0H8P{;V)>!s3 z&ICQ=Re9+Q3E{Mv`{2P8MR>1Ti0=&wVbzZD^zs`EJnC?QOwxXY-oCcZPu?w~>&4=+ zO0rKlGSdMpZ1$n;&7+lXTa>8QZCem2u4S_lRj5VpVeq^;hJ9F>3)va-xY!R7{`dKH z>{j22kK+zQ-!5mlyaB9}Fe zLxsg5!Pp-i@Z?YvahfRuJO1r~TDlkQ1wHUo#sCuZhWd^N+3;5{PhfvWnsn9#;jeuq zIBmWro#&qG>?GY^x_SvE4fcLuH_`Q@iTi=`y3^L;@KFZBDlI^AKqV`36tl1 zgt1s5G*w}sCV+>7Tp)Zn-RWiG$g9%Rx>q2p6Dbh_sZbzxapo=k<4j$9^FJalPsQXL*# z`5brdG66Ts^vd$3uh`suSD9z84FugQz~R#_vmbtEVUWpD^{Qc5LcfFT?bptizn%fI z`UE?);RbBG>_iI-Y-sq1a*U~$q3x{^xN`Sj+>=)#G`l*MN(?6BLpeM6PdR}!F$)|s z{61zD1=E`52za(O0wQG*zXX3H6LOSkK!mkWB0!OM=c#eE(WdaNbSKVx^qHk4Ri`mj}(lqf{dsou*&u{lQkIwAWA7LZgN6FIJMBGES)MDoG}8(ZnhLcH)>* z*D+{q3;e72icCKv>gt;Cju5S6TJgb%6dHj!sNaT>{ z1#>|tW{iz(TXDs0ReB`17_*-JzdKOIyS6t_#byL7$@v8fURc0jY@N_MVHkaKUzv~8 zHK21eQ(@wDvYks?+$9y44{4XbKEBpMicSfJ{^ffwEeA zt~O^FuD_#B!-C4;XW=|J+3^!zy4?e9mqL&hjw1FIKgq}pH}2hUNG{19LiM6;SZdQP z7;g2M-TQHl>1vB|6{}fTUDgE;H0+#pV+C_0K`XYWg`>_>aS!*zf6+*7_ zJ~HoHy-@1sWTZ3H_=O>sBHm>Q8oi2vItLE%ZfRtAm^ClOR9N)g9;@zKLgzU-{%ZCn z5?6i(ekfkYXZkI8=MljZovtMF+##q*7{Hm12LxtXQ|T&P&W=tQ!R|f}!D~LD5ZK^@ z(Ho@bqwY7t_Fcu;ievDZ{4P|@a_8GOcncRi*o!YSJ6V}rC=CmW1|ed|*MQ*ZnS4taB-v@Z_gZU;KIKAp|~c^lH+%0?`_s0}VTdUX7_U6{RB6qouX zlWe;Ul|HLQsJq`vFbTVX^FK(_y2eJ5SW-z=rqz%GlTzTuQ)L^SLe@rFGn5;Q0 zf=x>r;8SuG`ES)G+#FUY@Uv`$NJB+VOm|lknY?fx9krJ z!$>+>v)wTHCgS(O2~2#`S!}o)#fEjyfD`T$KyIxp&rFe{84h!()TNC-fNlXX)}(tW=hRnK51@FkKS7w~3+p%*{|aGXj6_+>IAI z?fH`V8vLoBg`m`Y1d;O{Al=6v3qsLIQ6s5`teUROrH1<4^?gTJ zg{v=wu6m5m+U+1a_O>u{+j5*XY8YHyJ%*Ajb)x;lmS*d#;kN0vY}&$noN;&+Xt$ro z{n}Be7IY43k4nSt5#h)ltN|naOK?9nl!(8Uf-U2V(bYH+f~J`8oeLgg_?t&8LZg=j zxBVc8td2t3?yETJrAs1D>|AXO&wo*6C`DDT7kU3dn{HMA zjJiYF#5#$;cpx$k1AkwKd$RRV{l1@s8}Gtx6U(9e(_9oeIGRV<)WaON*=&3KEGShN zMYoLF#5}xDV%RV#{#!YiTg+>N2UlM}SzDgaqP?G-I6ImdEWOGkM7NL$yK?cCdly8{ zdBSX;y#Pf#3kMqJKxOq^?8;lj`fdlI_O8Ep?ByomB}D}qyYw|KGmFNR&#G{hs0Y2J z;fyug?jbuo#EPuliSG{TQybf2qPul6XuO;~l<8a>$^yJ-W5oiRpsJ0M3rw(Xc?`>( z-U{}EvzJP7JA}>>=3;7+Y+<2-=Xq>10-Pf63;P?wA*9qw zAiwS>3bQk~=$9zGspd|njc$fhU)$iQxf5S?cmrfU3?+YCdfD|?56Hpi#_*%d7<#X% zLG+Fz%%jH$)1y?m&yN@=`+5c);>`GyW7$IAM~fiBPl7)a{Q+pboD82iiV74`AYoM` zxcXdz2IW*xN;M{Y$2Ppc%SrHd5A=vEXLo~63eUc)g$bYH*wrm@STNK}zg80i=f?_k z^7bB(tec0%V^+ebxe_$__9ZA?y#>occH@6<>x7$w@8FhzL#Tzj*yx{|p|mp(WM}>) z7sx+!3{wGVK^*=|v!T%y#e!=NZ?NptB24|So`t=-0ypF)&;w<6Ah{s>Hcf*u<9EK zQ*+!+>_e^4Y&IH%4-+){09RarwzrbX71=Kx?C$X2-2wlF|LH}h*Uc0jZ zI_HOwv-x4LZRQPdU6qOkN+R%aeiQDzB?0i@B)ZJd5JKEp|oG zeRk<2c=~L3JNSaB?OF&nhV8gu`g52+jJlig_^>K{}} zKx2gD5NGJfM=z4Y@TE<{eS5U=gUUTYe_Hc79mG>%8BCJD1+BNOXpTl5 z>AhVE#Qy`i`o{$tG>YNY0!dmHAVuYOf5XylQ&N|?TWz9|!yYpYr zw|@d(uj+_itJ*NO(+Gw~t!EYwx=8;^H$MK~2qEkP%mp?VXYkX!aPx z%9r4>{a5kbAA6K^QRCO{>>wJ!t?a$#b}YMK1|iFwh)aW_~O1)>v)fOqCSo?DH@d*~pOP&sQ zu1L{M6Kq(sV>O!?;6tYW*bf~lJ=kr>*!cMunf1H|tezPG#hR}L;puPD^xAJ`b^8>! zo{1$+yC1`ezJq9d%#qr(1;B$jpV-KcPXxCf`tsQ#FVX4dOm60L8ryoO@R*Q7Hf)^* zw~O(|Ah#M;0R*`)1Ye3&V_2;tCR%Gy?eOzNx=Rria&O?L={r!{Ef6j?7C@xcNu1<= zjWqu&Kut+WYL~x?b{ucR25TicO7Si zjkx8|Ei_AgOww*f^B~FVf&*f=$iFdL;j!;a{F52L&-6aP03}VhUt)`ID}+qv?OWFF zSq;_acHlToXJ&KiHq-bNLf$^jB_Z>-z^o(Duyw+6u(J?YLqyoEq%KMb@Vyh*zb$>a#x7z*cG(fdO7r0 z*bD7DBgv9DMIL)bAE$p;rn$SL$*PPh^6ckADt)&X)-93c=YIC${FVXYbJ`lFDn#Jf zp?56#Cp5?`=-w>=CDXDE!lk0F6k2rsS!H~s;dy%S)=o5#%b(gEz%@MR}9%cH?Q zYi{m&8yn~r43!^`cc*;>i@afct?(PHaea=5CU=lQsK!^h7jf0DLqj}sD><-f4Ww;q zggFTgaQxv^D7^3sN1_z%kqrl#(HhL-Sw3zy2*tquUl=uZ5oC^fjn?<;u)Z@6)P2*~ z@K_;yp4toJR%r8@ce8QeODry$k%m3t*O-&j0NyWpMMi3##y|Pqba&Eo!M|27GMSA8 zuhTE!+wVj?Dl(g@f6HeIVF4KAc?kX;*oSYs2U+2;2>Nc{Q&6@%$_Dq@VVTZ(;fTTW z=;>a~7T;{$`)&b@p%esl*YR z#*W74KShMuc3%8No5WB*YzlHs$5_g>qmUvO2yaD;F)}I^{<wiQlRs#Bp zx#*pF9HYO-33YN#VpHp3vNk~p&emmv&XG#;bEgHYI@t_Z{17h}ok8;Wz z1<7OeAwB&~kEgw1!3VN25$=%*?F!Os2V*taShd}rPi_7D3*qHXFR zDRoHO3zFc8!`;f%F$*ELX9bE%EAv@bj>CctnK;`&5NZc&NLg72QT%I$wwaUBJvE=m zPgw^u=eFVB2^P4v$sFg|r$YA@Lz<^ljmjzqNXT+u(0zWIy%)Z~9q&fd*(M$MhaE)m zqW8kRm%7ktQzFrrox?9bv1FM)?&7f_=6BY(Ntjz%4rh-WvyFM%f&Ujfl(D80<5p9$ zAlwUl@*Rcp&ySEttKzXT%MnWa2)tkT9gW&Y!Hg;Hw7q&Ou9VC$Ce;g#Uv%K9ra?xm7K26q3Apjm3D-w2pxahm#SJ?ZAeCrvZ{HunK*2^V8q~rd zF*O+eGz#>6o3ZMy4(P~sNonEy;F1V>KPDtuK+M zj?wV?L@DT1$xu8xR(71Hva%43G!lmmZKVsco~Bu7OTRE5x22Zrxv!X%Rx7t z?HJ}2O;pb|V@GEi7+k#~ocr)O{L~-Cim50K9TKYO1BY48sSEhZ&5~A^r-5GL7f{+W zhseF01qWi~=<4vJxX9)g#{8WIi}zNMy39ndIwirzIgMd&W41v^Nid8H8^JWb=d;^o z?y&#AvqIuCg{JdJ2zAwi84309;Lq{Od#w}k+MFF+MAw#n?v%mlM33)IYQwQ>&0$q< zFzG1O#(8TVGwtq1l+#&*`zj|2MjSp0Q!STa(es;V<8up*iXzZG(i^Y6*o4)pL9k+v z1qSI>vrRsd+&v&0W|2~6y73k~$(;tj&pI%5^RxO)MByXHscD1*!GW=aokHd#}*i{lkJ=>~& zW>vS8N!mpV&&uCp)+!&Fzwk9)elCW$L#l}N$}4bq_aFwC?88~bDrDn{cMv}L7KTb{ z&~iyx2nx~Wu6jEK z8XNKD^Bi*LP7~3;8$um6G5q&OgnhlT9zI{%4Qclpp+T=o_&}@~sMH@;;8O#)X8mKo z@{T}|+$$)K^nf@o1Nxu!R5s7J5m|mDIM1#_F)-lMro2X#_1V~zsRE0JX13aD4O~7Z z0TNE$X90yTNbL!K-Wgbfi@R5niWR0@m}`pGOM_8%p`G)^BPzlTk<-yL!4$)e$k6+h z7PQ!JXs%Vgfu#cn`0!0)_%ukCS7-clQlA;ijIQ;tr*RbTS}h~f8h4=bb{Dw3CKnUF zx4?wzVVI$4%h&8qgg*09s9s4i;fMxKoY=*3oo7Sw$%SzG>`y^~-D&*VC`MQMufnPb zNjRmEM80=K;;mn4zqu$p#V>3@~9#Eeu~#f@hXCV&k+c&|SI@HqD;n zbVyzw`qIYGP`)4K&-s#^^P531&jA(h#V~d6tyr;KlTk5b_8YG` zcwUqST}wOI)z|>DMfZY-jx}yxe22(eiea)+KHD?819y$lg~QqxU{vR6mi zm)3-X+r-J3wj!A+nQQ``X%+Y~Gy&JkO$1xF0NVdmz;cqe!sTmHbfvQ#9quxRCQlCK z(mm$9zoHAKNH~z=6<#Rnc>wM%zAqRNrApH*GT_3ICdiquLI>U-f{b~L$c#0>{s-4U zqF5RhEOH{DL*91Ng9UJGpaN8Mu0z@5=@1reNgIF7h3ZP~=mH`~Vf5!z+*0arO{;xMP_LwkPixx1Hu#WM^SHDQjk z_Yh0p8rZ(57?!XHc(P+E4|rw98>7|_`LZu8-R2Nmn@3oqULM)I-WMKc=fpm!nO&ns>jh1Wh+-1s0ZY_YZR(D{{P;TAg>;_52gw9pIfCE`u z1-NA_J$ytT&B8~rGh0f8yUSwXwcH=_(6&l2Zi*(4EGmYk3ni>isS)dMM$@tN7jT&F zF`{pKlsFId-Rrif)1zs_>8*+w$Q^S6L#ZsDygkG*FLK3iej}O9sBbtvWB|na5>aUC zLOd=uuq6#=;Aw3>j1)PDlbUX^aw0~RmJ~yt*IitDp&CplJ|n-rOyobyj-cxIpLl6$ zCCgMBL1yHBAn7AZ@%zh1U~9CN>U+!`A{?Xzg&*2s>9b>SLV7#QS+p3YnmOaxtJmSJ zaU7P+{|1*VQikSF2AlBeH90xq0Hl@t!Ci%gU^TTL-p@OV-o}-v+#AfUuem^ylXLNd zXaQbZxEb#}F$Cq{cW~oEGK`It!LA1xn5(^vdfs(Lr(<96!@oSD?HUb9FUOK-g*|Bh z^&D=myM-mK4?)Sri0Yoy!e?pXj2@WEQrlPZo~@tI?@=snt?-0bt{LE_AB&O>l61ay zCOS1Qqt7~y!NR>i$n)XbaIbEIaMQ(jY>kVDl8v`n<-+^8u=z5Ujh{gb#2=GCMe@}A z)ji>ND;H>Kk7eI7J`jsh({cBp5z%<~mRWce!(r)EoDwTbw)PYsb zwxUvaK>8`JTor)>)?PFzAy#+=;%R8^QHfZH%d1glWPT!qJj9*d+g5&=B_tm9=(~ zKQ~XoxnfOPYTpo1skZZvg0}7eJ_J6FG94gZ=ntOyj^TnBH}o@weAVuw4L- z+WZ9Dcb$N-2h>SfMFi}tvElVSg=E%^TolbyBRhj^fZ9!?V$$Mtdh`R(aT4Y8gEH}M z^=PO{?1HCqx-hnX52{;w;z(ODI2nSRaY>**^E&tC9KO# zi?$34Ld*2!Xl<~6D7S5ZI@Q0#w5}gNRfG-Q8^7bM)?-4d=la81H&0Btl#Xgf=V4Zb zGk9g)!nIyMNt;i(P^-olOVsqZ;ssMact#0#lnvc?5>JM(m$9tRVLUzI>p-_@NTa^V zMs$iQBZK#SY5YAqzM?r5S6YtdaI%yv3<)Df!xnd~8~8o^?=^!J2Qr0y&NldM*m<~J^_PkG6p*9N2ZiDZ>U_J|5Rdvd z4E>9QV6bo|i@CFt&k!Gr*GG%u)ODe-{Q6PQFOvt6dS#SdDoKa!L&&(FgZILq~|9i8<5ObF;3X;fP7QNt2>&h`xOLZw*o)bGAmjc(XI#ZyUSa z!3hfrhu;bE2WlkIV+oR*t0_rJ{)qVe#Tv;k+xhKNw9+T-+A~Svw_?G-s&@X!3}?al z#KK9N+9dIHFP5~Wt+bWX4if};5`s*%5V0!!BmSghA=nYhm)KNr+TSYb3#2CTi$N#8 z!IS3!IHZ zB^TH~1=Fks#Lq?N1>4jpfq}esyYJUd@$7CO5&2GX;b-KEJu0UOYP$RR0qxU8_p;c6 z12dE))TpE6&dg51mL08<*+aRKioSYrjl)jKvKz0(^rwr0hkIjrgNCgV{{B``L|TKW zac_&nGa+$DB4@e;514dM*(4?$N@ zzG%pGiEY^+TT+y6D>3sg<(r28o%D4J1PzK)Y%gy~;72YzAg+3_c5)vz@r!<(kjyzh zAifxrA&|+q$uHNfpVZ-4(zPc+ylr8qV3ENI!SQb8c8mR z#rN3f1haD?-D-t>QJ zwx1_!bvDh@5+pV<+J8izgilCKT3+Bb`DOy(x1+x~8l6Dw#{ z^6gt@wSUU3l049gmpFJ-3dB^L;OV<#l0KTtUlXj;ej#XWd*x)lY@n5G`)ZdA|Alg) zXz*K|_>J5F@%;~zSofm>K2Kdm(%d|mzmKVta7&fiNp6M2tJ+2MdZb&>8sOP(H}i1Z?$~!zNTk3POI} z;>Sy`6^QdDu^2~<_FpO&1R?%elGrqL$)ab9?VpCO3uK7bj2#US-k)5=!^IG>4|Qt z8jH&}@M*bOru-<%NUS1bBsP6#D0VPY6Yo(q^^au0lq;`2eJ^fTKPQKX; z%HmwsgS> zGj5TOuj}%I^b;<4v%(A_m=$ok0fFV{*$IvDm}&7tf+w zk>7MhSzJZui|oel@Dv>_L{Dgc{>WpdND0b{&99a6RDTWf6xYq*Q$D*zvkoVV9)>6J zcdT|2mpgnBok>0*N;{cB$E0i)ou0CmpZwC9&ms=;>)P$bH)6^}r8g#cpH?XFwdN<2 zyT=U0URRCyOBV;B@L%Ed+e2PJEzy%)1^3W#CLam4PdjP#;$-+z-ken1s!Rh7JY;G#{CzRrTC8ODdspDi`_}Z3zVh)Z=`vEn z^OLM1%i$JL{F4k`wT>S+VqncT|Kd%r*1Ca**hZrFj19ae|K!9b9rogVGi`XuwkPS+ zYCgOJ!aC92?j@q&hI^u5jk)5;A|rmmhNHCqlOfTAylmbFaXMZR$`u7{99C+4QoBd^2gyun;LeeHU3my0i1<6KFW z_ZrZt)jee8VJ-=7+rW`y@^Ft{HTG>bByTWO$dX0_+9 z;hB%>OuIQO_53gv@Q2cSmF45te zaiTrfn&9=(yV$A3od4yz3xAfWs>s*Qo>%m7ov0)`g*U%YgTJN7kf;7+gm*AMmMk{A z3Y70;@|JFVPWGCclb?fEinibU!83fo;m(k3p|zqf!N_&#*rLsc{CQ&$yjk^x{Kl(7 zCQB;FyMe~!In6sXBkUhZRL*AVub4q859v>(Xg_uhTJi8Ob zdlVSrfZvz+Z@&fbb57QZ6{G(0V$45_#gUHuRT}pEq1s{b!^S;gJr7#^mHmZBRWSG! zhm<9eqBMS5vl9Q({{4K_Z{|D&xk*mUAGRbkSWVI;vX{8Ln<9AsRENL4sYiU(q*u)A z-zgG-GX8z5$Ksobc`&qXj!5;h5bw*0#rIY3kjUE@l(|jkz58Q;R*wkDgyMMkeZ_py z!7cyD$FhaIhQWK}zu#+M_ij(ZzQ~OpKHo%to4ua5(MK1?w-sTH8`J2Q8?EWbmD#vB zr3cQg*hbGeID=Hm?!fi$T<92fwD5Y77B5mri;}M&V_1$)21l2X&9#x+OwRl~hT&U% z>vEwb;~ikzY#T{o&q)7brK)h6wYTCCYwnek3^U$TmZW<&(?n&JO~sBytg^PhET?D3 zZH5{d)(p94Rv$HLncYQw%%s)&O!s0bmfMxxj888ut&ia-=61u!j6+R9R?4f+F&<}Y zvzC8|wSKHv%iL^X%1oZ$X7kBmiq&uLF&ka)+sxHBv#g&7cQE+}YMI*_nasyYwk)Z# zm(~{oPcd76L8jFyO{Tr79V^*?BWo-#%jQu&wrMH4z!(Z+FzSFxpy?Je_wom>8pyLNCBciPr^PTgtD z&VDz-M9*^Ag+~&&)QKJ3hqn;-d8-Ha??x@o!M*Jq8E4jW*XX<$l*$#p<15>Uuq~(YWNu{GtAk(Y9XN=Cwv0t8@X?>6+xW5yQGedVKG1?v)vGY3;*$YGOaHNtpa-X{J+17idSx+y= zv%ihAIWGM>*qmRZXXb@ua43ffc9QpyO?}l8_9>To#?rHDHYUM~ zSuphhW53ZUW+|`A#)>ipDA?Z<92uO*NBg!^>lCSz*dD z7*=Oq9*$)_Si6K-IAeuP`oD+P!&U~29f^9(zWd)85>tD&WW0^}IRdb|swNiH6`f>R z3@>J?&D(CZu}8wlcRp*i^LH~tv-&AVtf$PBb$Gxom}y5St}SJ1p3>*qr%z!oH(kKt zGtaR)bbfP{2TeE&KQyvy&&6|FaV6_0p}as<4A&#~IU8 z^|^m(>&=9B8kl#@n^+3cPK?!9iCxq_&Q>t0Vs&j)W#4}t%yK$*ih1d> zAM+h=D(6a27CS_t-DXX!A5&LFo%KVunzg@fI(v$2KKuH<6APaOE3=e+OqiZr4(CJW zK`U3~OqSh-sjP(-fsEUB)|@?S7qL&Td}TBBpB4Mdm=iO-lgS~EKDE-3mg6op&|yQ> zLmc+?Ib5)vVtaD0vIpw|7^%to7|Hg=+<6Yc+@`5g?3rJevio<&u%9=)WFMA%;{-eE zvR%DSay~@T%zlP~jSPIpKEO_81>7@Waubr-p27ysvBo7<^x~7u8*mq+cR-pU^(Bqj zk>zdkZq_l@rz=L*W9kMhZi5&2NFInO1NcGHbky7#ZgzNB_8zpaJgE9lZb8mr5IDLX|rQ0_c79AI&9*6_p;c(6Rg~>{9ye_ zH?msTL9i0MhHW0Lw_sn*Xl1!8$a5->9%a!ll5OU(R$J|5oMwho9M*?FlX`lyhs`a0 zIYwGwKVxy=X6CI>4My_jc*gR;i%jX_35G&Y5##5LeaxDJi>)ixOR>i|t2mc>9f*UH z9q{%?HVS&a9ew?LhH6@#PCeas1G#!Wr#!#dVDY+(RIrl)>MREBr9X+A-DkOPv_^@0?%ANd!-k58h(MlhtpJp* z1%}I{Vaw&S-2DOymF`SHomZEE3T-ny?YlgF>M^}7+}8(9`4a#%wa%lk=}vfrT!V|t z?xH2BO!&DfmTPv&9A{-7LyUnj;j#P0#FEt8sP*cV>ebWd>gQjW(TU`xmli= z(D1@{ov+~NYp2MO3IX{wO`4R-aH3yp&c!i7pW)0y-H?&p2XbHj!4`9i$WDe0RqGN9 zls|eCO4aeGw%C~1*LIbo%(4W1Z-*!|BPKFlT}WJH8G$i_wQWVBJn*c)4w=PWCe~KO zaW(ZcQPA~9!rtA1Njy|V--dp06^$Qq4ZE%)t;|L=1GIB1zYS4~KEx2ZH;$l_uga0o zxk_l;6AIQj6ru{R0BW`LVY^iCqey^;u0Th zbm0Es6y#EwN$9QpLiv!(K={QTV!E3lp8MrE$e3MBoL4ZVguCJp%Qzjlir0d%MM4zu z_%rBI^hVP~Gthy69QbAbL+;d_v#?d!6%cuQi|}-A0bUoHj*{ps^iJU2Q5vescgc8*a1{Rs1o^$s(cV_MfhZZ>l#kEmzAWjbXJ>G|_)}DZ? z+CG8}qub!w^;?C9J*E(md8z2htD`t%o-^!x*2s~Xww$t(ZA4w2BJS918`!x(4QF3y zM$AA|BQNIwf+UGNGy)2D>j&M2a; zTN9wj`!^LlDvMuj^@j6TW>e?atOWFzSR!=AQCz%rzVO2F(_l7TN0|&<;l7v(VU*`A zsI%n^*jPJEc@=B~Vc`>COZRbMR*4(-e{BhGna_Yz--g0?^#f>%jXs`#MhUOH@Cf`4 z3dbq|%W$YvD&@RxIl?j&z~)+bXQvC6m?5mk+JsXM-+%!Ink`-*PGA@B*@(GFnHu@d zKxu~~(WewgNQnNz<-|9X&HjMqo)gpCY(uc)#}lN_@};Q#`Y$T+51Y1LWl1kIs3SYt zrAR_5hg|S#CC*8W#bs1HJa90B9{hX|RKGrs--SnE<&7WlIpe!zl*dl8Z=Dt?rL~im z`WT0vrcNQBR(yiRw~|3gI-o6-I!U*l8tz?1Yp8Gfh9>*mz%G2R-ksXGe;i~f&%o=%g~Vdahu@c!qF;xU zvFVK*pl~;n5*=2dp2)`{xabZ_Gn)`j`EnWDj*7;kU$y}EURBJ$X#=7j86ywhyWrc; zG?<}!86CRd&xHjtDC)I5Z1I|f-p{^=7VhweHPI)~bTGM|J>C+oDw)`MwV!Y#?KMy? z3PGu>E8C{@?11-DOd!8l1g?sB$mGo?Q0=J*yA9_Ezh&lOjy?yz*v^C8LN|CIMu_4z z11#SpgSXC*hWqV;@cW-`cs#nEs=qFU8nW(yKcouQi2M&-(Yb=n{M~SY{x_I^NRK-G z%r00BVI_>5Lq1(1Ou&=9ta0G8r z$wUDNy7*XVMN{CU-ePnvVj5m?JOT8EWP{P;G%DIy08FA(F~za~EiV#@_yTt%oS06q zRz@JPu$*d|z72q@KL|Y7fl6iP;-7gf;7`m4LgAYeaH)1gI}UN*`!-VU_oG3{-$bry>n5;t`y-)4YbTg&6Gw2@7omhT#o!Y^1x%}1 z1_r&`ICTl@DE+M4$o&3x6mvr!>@uUNg(1&~FjP)4|B2AX_0iP#nI_2d`x3Ne&vIhK z@)NaZSutvv{X{rtQ3}|ywjT{H&g05`s0NqwTexkHVhAvB(GY4pSYycl8%up;$B7P>!q?#8WC9aqp z1<5-AI=3p36%tmpwQ+Z~CxIEyH=*2{P^jy8gv+%zC&qCdwd|eNBtsGrvq#SnDSau#J4p>? zyDtwp8+~sTBxMrG{Q<E_a;7v8v^By&xFs&6X2|# zNWjrfG<~K$ygT@g%O4*AAB}0sq0x)jD(OWE7x`H9E*JcAv7#7J--s_Rl}O==gc{zz z27J$)hM%(66Q(;ZQ``Mz(W!$I;Bk3Z+pc+^sV&FvQx1z=g@^Wt(Yya9_v0uB{dJh& znlAiGnYNUoNC$atQpYXe6Nb@&r%6b`dKzxH(M&`$vcbEs44{-0hU^|{z|0qNaA@sP zuC(rB;%2W8(%Vu8EKB-;a)K+|eR&j+yEbB%S$b{jv$Wxt@J2%RZnxTQ;~7pp?8J#Og5t#HiSJW3o(SHt0v zYZ0xZjuQLE;o0$6oUp_O&$MeqM7am0@l`-{e>{l{(x!qpcL$+=T@BihRzX#sa|h?H zeF4sbaPI4VA2i;ZhdVw`71ql3pqRwX)ag5`Kw66uN-HYooIa}{j`Q?q)CP-NKy%c;&A)L6H2rD{{`|0_^80g< zS;u~$_0|uotl5h@^DRNW*B7cw{sj2$E`_N#MmVj}4sX3_fKi?ooV%=#TWK>3FL)g_ z+5AP7+OeaQs8ySdQ~p|_huRb5jDJpa_3nPGbkv3nU@4QDnNM)z^c1-D-Vp7v-3)GM zVUWw>m(q5jE9ju#mvG&yTX17=GW^f=5u@=?L<_1y=NTZ!Uo_6Jnhu&ngJAs9n8s|V&2m|h6PXX@tVv*UQF;w_fEL0z8A__dOQ`_tMxr&T2 zFz`?hEnno0u3o!H44jIk_UM+PH$Rq9;pe$%C99RXE|US?op;1WDNm^hzX%kyBZ*+b zM(X)!8+esE54Mr#(c$AyIHlfA)a(|TC=3LVqW#A7xn}|?sChlFAp(=JRCft{;ygSYa?)Z}mPKPpRi(3sS z(LtSTC{)8onmnMw`xE%9!#Fn8`VJon7eI9}mo$46PklJ|E!-U8=pEZT;~gRghGOqyq{FOL!9K-7G>U)+`g=CtA_pi)*NE zVnF!1cQ&_EC`RRa43Cis3Q~k^ATyl_WZz zbcGb#?;u_L6X?JN&h!S8PI9N%fF854;vIh#MDsiE&}%G?kt-{9k-caZuXkbzU1hnF z4m#q(%X3>S8YNooYN2`Ddi}Jty8w3w6=fxBYlKubbR=50MJ1 zl3`wG1HIwdC-P230aY(hp#3byh*cjxfhS}Qa&6hfJ@h^fY}}p*W~jD7)t7Oonp=a5 zmp_9Wp1Q*G7xEx+K7`P&uV~di*@66PYVeiU`7q$RDdm$PjRuoTaRfHDTS%k3jn;4-N;Kqq}S!a6{n* zm_H>4$UpRh6_yUb#dj0lcmEe0ndeOW>WhGSyN{!u>gC|VwN7G znc!Wj7fzE6M*1gjfd}#wj`Xbs))ASg;KmI!@Ax_Bwu>*^o!o)E&w3zWY%d&J5QE=b zw#Un~UZI)!7m0iK)6uRmCbqq3L42#uh8fHN91%id&iouaZ+#+YzPSqi1`A=y6*c^( zq5}SGNQR}-^2GA5d%&%m6X^YOJE$D8114Yo0Vjgvp=*pX?RC(L8xh%CLKWh0VU zld}c5J-Y;sPwzp~r5A|SWgH;)bQ_SYIi-;7?!?{IG)i#D!JpQ@Cpqa)!QuaUah{<* z7Vmlk_1@kBW}9hjYVC!0?7dIsC?2GZjx40_={I1+?nCE3?82MfZ^PC1C*bmhcS+WH zNS8nH!!0+yL5pN-vaE9r2>Yi*ACP^CWGZXhOxhB0{q1@xXUY`F-E|JhUWnr!sJVkw zRgVFepj9xQ(T=`vlf{;HD$vd(75&omg~9H1)L>XSmHTQl$c|cv(s#cG_GLPtdfygw z{j3Jc-fRFJpU1(pKmQ@)&}j7UY8v{ttQ`!_DTctqlW;6o#4CR4!}pQhl;3w#s=&+= zs5pKDbJW&jWuGeUct94SuC4*+&)DM!^(0bGe+#zPUL~@13GDoJ9_k1`Mkzmv19#P} z;6zIa8d@Z3{`7S>xHGJe)?R6&%mPAT4ZH-#o5rXs2drTkvlMOJ{g?P~whJ<5Cli*c z2e9FXY`9~Y8u!yX8c#h@h34y>K-P61z@=anWN>Oep?h{Ku`RP7I+?ve9S6=~XF>!L zyn>dIjb$yoFNFALptZxQHyIRN%z29!}fPD3v@eP5RR$pqE;57G_l}C0&1paKBtXDLi^@ z3s(JWOd9X0N3Dbro@r}FJF0hLCrXu^)8~NHn*8ts%}wO0vQgZV53qBIB{u9X#yJru zaIL=zz4W;OCWci>gFSgj`qxxS7?cCMUM&N3LcWyI2RP7xT_upf=j<=!_WtDRLDUS+~vF;p58l~5{o$S&4Wd#FKw7`UC5vm z>VvR)-Yej4-9$9{lu#>T8j;ccI1qbS5iX)1qKPx|0F*32Dh4w_vh-1yeL0`2xco6v zYVja0RSCJz(kft6U=mVItU(GL@6h^>7pT7%6|uRJ25SFz4bOYA83h;?z=G$u@YHXU zr@SQOVAm@HL_T*rA<3$ONTnOLXlde3y+Dwg*^Z`7n86uGt)ZrMEvkKy3sZ_71EpO` zgtuA@SbvH@VK0^wm3ejOSG*UL@75s7`7M+QLkV8I;t5vdxq_TR0$e-9hHG!-aWiJK zz-qA~d@<&PN{=C^h#G-&Nh}d}sS)+~<$|X^71(DMg@3=u2OJQM^g8bWrew0Efm;q< zd|(^i@F4*+4OhebwH!iq=RUmsd;}$s*-3dE+5vXn+>OI~uY!Q^+wg7de*DBK3DvG+ z(e776xW^wYA^RlrCjZk*EH$V`7ys_W&oVT~2=5Ir?RhlmKCuL!`^ml~D>fV$+73Z)K9F z+++w-w{j{eZVj+m{+NpC4g~6^4xHFk&0x{BN~(849ep+_13Gi2phbIyfOZ-NMMkNp z<8=wuv3(Y}dHN%AF`kYM2FHl8K^{6YF&6-4Ec!i(iFw3~A=#CusrO%c3HgI^;C%X5 z6x?(P&C*y;6W_X+>~3_c1r_Q!T`+HzD3&6xK79gF%lL!R!^Tu(u%*YzT}%vreX?hK{NDu=ptm zqWY;ltMWJvk%~w-s)c_XUIl+@PC@2_`FP8NP$K4f8gL5y!maN5F1!;m177soiZ|zK zV9#n5k@2!sM3oj>Caw?Hz%UZOHqgc30&YuG}S`|z2BMx@(nD&e%EPu{?-=Z+gSA7I$GAq&NuTI3- z4nJVOtc|j)y+oZ_6Uo}zSzdP^I&7J+=n)XUGv*taaSQ`x99PU9IR5ugz zZd!w#Iyux0kqKIoDFsKGC7|WtC}pUA9f+$8&|8IMVExt==2|NdT&2J0f}avh>3&FP zSC_&W@1LWCkuLbuyb^H5Q5SvsiqMv0CsAAdT+H6P07X5pfj1Yu#hOcG!8xn#`10B# z!1>o|)L3T+1Kgj&lF`dKfAr0;uhSK^L7@5=<=l$>mMZG4-FjDF|Zp2`~3luTU;Dcx*osTDMM@8 zCSZSi4f3bQXW`S|G@f^2A6oXti28D_8lAhd5FIN|gVye9ykOmM`svV3ta3MyzQ5ZU zUVC>LR_)1!-&K0(QNN?Oa>^>W&QgmkOZY9^5fca&?KXk(EF-A=xtYG2v4{+dAAwba z$*@a98sY=Wu+DfT^gdgR;O;WoihBZXe(w$fp9W!((HSfnZXW< zAx&J8QA0K+Q(#H)JY4*F0s18(KxStokk>cBPtBvSzmpVfIyXcpyiEnh+D%A(&RN_O zv6!+vD+6c7l%tl8X3ASG1Ft;R09G#5g62t=(4D9n;)Ah0F#UcU6l_t0FH1{W#cOBd zS10S>%$$De$CAa6vXVwull8#md!Nzk2W{Bq;xuUggJ#WJAPeRWYvI+Z%R#2;4ESh$ z5>~0!1UB!}PQ-Y5{Q2q_moAdSpS!*iMdvMruUzu6XY+jAQ~3ZXy@~?0Tlb&< zLJlwMz+85M5C(SG;ccOjc- zdcB+u?bO!LE8L7oi(PD?ihU05Hakq6^_GG?7fz!g<|gj7Ma$8@z2V@(&QN&ZMG<=6 zFzIWs1(13WN`=}6!*umZ>aFM!5!>qLMVs^Q1XeR;IX63b4r2!w!JuKrUjCVr>MeS7c>$29OOD(5f*Fa5`Dq}q|AsF>X+0| zPv_l2ykax7_Tec?x^)&(mHvl_nMvFoUhlXYorjU%{K3|rE28o4YJF5wtO2g3l>vuk zVW>qf92VUOgO@JKqfFlrkW*HGY8B<-xqrK0!zMp?A=(apTg}6EikF1Wh3nwd5L2)+ z$eLK;D2MV43?bj)5wcJC5AH6N#?|s&$fLf7EKJ-6PhKmdAE+{*)AKScXmut#8UxAF z>I}M@Z`)_^*}{NU_f(^6jc4KC%?@PoqKCBq7C(+{nK7aKPeP%ehe4yr8l8@Oz|oq2 z98EhqKon{_6Uo0!k(JVAVs+*wRQ3G_GGWGp1bP;Abp*CU(bTUKN`V*YwZaOqhPRE^Ch)Ay#+wKhv0pJEir!jE}Hhq9u}CZazhtR z=0Etl$ft#-j?m|kF4L5{vAzX;T-QqN{G5f6^$Y4h$Mx`$Oe6+U1IX*y5*&2qeoL(E zX~J~x26!Jv!K!OwWc=A2EEt%KHEl0~XKlZM?Uy9fcO1YB@fW}!cW65o-$g{Zw{mf( zED^Tl9NHvj4aKqroDeN#49G=q8q4IUnD|TJZ2EW0t3RIFUgSY4HY|ytPDYLZ9&HeFcA<#P6iF6+s5TB>l zBL0+Aiqxyab*g5d;8_MFhWzoRTu-bBqVWf_WOQl{35-_`g1xz$;nqFM*h4Rn=qF1l zBSthZmVF3RpRL0GG_C=ekQ7*AtWG^wi$w;k2=wIKe_Ts@89Z%IILz8-3jFAF5Q_psYzeqmEVq+&>|S!a!OL(DLJju2`qEr#6+iP(5V zhz@QMuo@IH{o@+K5|^=n1UNS#bEj9bFLxB3l{H_M{(V&kse)wR?e=Vg2>-!uwg4z zdvybTFhw7Q-L`~%j>oXW)oS$HoWMVPGq?vjwsQ|u+Q2z?cEE~>Ce*K*Lfom}hb&Es zfl>PvptE=%N=T7~5>Gd<fsW(s{^Xy#`UY!Vw8{dO`PhBaZQjnW)G<27nkQ z;py2!RLkEYB=ZtL*svP4Nt^|I`!hhkcQ0jGZ3&66O<+og8d{_6jp{vwM3g|9Ug*f@wrbfV}vjtt&g_FTHFYc}uV;7$6_UmcOGmIbfqc@LQ* zw}JNNg}uGFuEu=mA0x4={AT)zy!O2Y_W4g2N!teTZoe_4-fH_HN1bXS?Ug@rf9}L>iC5y*-;e{YRmZ4c z9RZNru1y3tsGykdm%t9Wk0A8|2fg{?PJEIvr7r!hMw`#{P%C$D26u`MqR}m?)ZC7-}gWP zOCCntw*ZU8(|`kM0vAb#15aZSG|mYJSO2TRQoTx~jNuE=$vFjYZVW~`hxZYCS}x%6 z%Tn~em|d{(zCSuNr5#*&?T-Wh&ckl8ogmTl8crF~N5d3EkN&%l+{%_huC@s^FO5gM z-Rgy<_Wxl-^~>X5n$~#vHH3n<4N=wG!qKg&)wt~TW$y0ly09f=G5)8r7qLCgCFnX=VuU5X`J*MU6(sRQ;*jfXaa_wmZG zcDC!w4A>jt1h{MbiJI#woSbcwF~{B!V6{F%RJXE?R53h6j_8%pyZX1%k<fO^4uQJ7dh3Y(fewh=`poTJmyEz z-l5vOjUC^~-dq>nSgNvU?dS-;_q?36bMGSQ-#@|SfI(Wrq?IhpKFwW6n&WYyB3k`8 z4#c5Qkpd50Sri6RjGUl?vJ-L0s|jhI zP=lqyg+zVYaVQ&5hTf>ZM%_O9&=;ct>Iu<|R9>t?V;}>wU#>&478{{LJi;HRdLUX9 zhBfk-;PX!=Wv`t8rE;2J(gG#C)Jzj>uQ`jJNK7E{rVkYEt4D_xAA^g?W^TRD2!XtE zLC+B-T>R}HS2fU&y38E}1AW&yc5;(goRK-)b2kyZtG+;NxAO;)G7eDpTu9rCT?vRc zYzgBmcM?T&DCjuq0&GVEP!D?$Wer{jUDq42{kt}7ndJ>iPvoFr?oXg;wG>a;myLBc zoW*+o(y{IKQ*eB?7x&y)Htx`$gQT;&F zod=isl)~$U%O~TxBKoCPF5Y^?5_&94hTW$%aL4?UQ0h|*(Ehg*c!g}C_4)dwmh(xd z_Ma8Z`(8w;+NYqfdQIH5GJXW_G$G47OK?*9UgF`lU>Kt*Pd-n( zDhxAgCtU5!LE_(P)E;CG8oW;nZTr80)eh5;#C-u8Z3qR@m95mna%T`3@(}5i@OsCKVCPT~h4#jPCD+cP;%tPPIQgJx znGSAH$w0lES5l!9lQu@x9z2~YOKlkDfq|9*r157C_phcrC?(U-u!jxxJNhGKX7`Zd zO4pzn?pj2VWG^CY^U=@&IZ!xmU=kgWBZdKJ3}){nDb_FZkne{~#u zt=@>f`&|W3`V(PS=l>YG5=SW7I4t*7A&H78M|Ni3_nldGXO;*_hoqEFDoMH&T@q5{ zELY{uog+C)c4wCmNl8MKq>@fP-KFU8`4?v1>$!f9CUa&2=bl^_%C7wLURXB=XC4vJ z^rA76|NiJi-((kH))Z8-3)Gb@w7Zu}-Yb3-^Yo-Gwj0k7s6{JCaGAgEWOAqOvskF)+I|mpo8Mn>T3eM%vsgdpV#pB+UC=#b8m?p4y~q} z*GY>i!f|0qWitC^-C6$jx?QYA;Tnj zC(`Qu#cWaS7JB9Ll~wVAU;N-b`pgln$#lcvV0z@tUD|5!rqG3dh1T#Z=l6b><9F*` zWQQGbCUx0!(RI`!J~Ho(eEwy6hSFNTu-PavU)MF=ts=~9M3&biu7UC&3AK1HRR?%)wXW8{@ zXNt8oz4<>5u*}vBcd>@aDY0+)HnGB)NsJ#ajPc6iGfMt5#kKMmn0k&6^ZnQ&bJc}3Y z6)h4!|8Y$yQ}bzp=Q&H?ak66%RO5_GsR>gfb5fL8KaGC$cOAR<%maE$=S*hNZc~P@ zYQh*a&JjO2lOmix@r`;6*6=?%zviWxbPCNTJeIa%U9oPWt-wtjAy!x2EbiD6)~ftEiB49BXG3-bxBP(BC5pl?s)^Rdti^}`VeOAq754_0XyRG-)lk+&Nhx-A(`jK2wY3wmpvvCXE z5#+|(5UnKqX`ez@F4w0wZ=S=yjB{w?=LI60#xDK_4=sk>7*7i|;`siTA)W63TlAqd zla{&snhyS}&F9rzVb@trXI4$>6J2u17E|Y6(lymQW-wSoe17$Fe#2Tny56Ojt_YW6 z`imNQi8hAffWS^h(@MZb%ywi7FWR&ASGLgar%o1c|IsJ*ufv$1PWOc88Z~L=iS$JdKP54Pi8R%tkDdzVMU84P z7es07y%YuUk+G8M;RacY&#y8CX2rRZq>zbs{gT^)(@FLcaoqunxy2hK`(pn{ss@V$ zSzQH!w45mxLDX)^mfd?K|K$f;6wL{#whPaaytRp!G;bG3j9z9-c9@%4ynm}{VP6tr z(KeAEKR)tNY)`%uY$$v$Ftan_GnQJ|M=D4yuie8UP$*&`pUKs{D42cB3X$py-vYO1U}v6 z?>nQ&MyRUNB$nkSChTccbxpgQCu7uf=9=FX&aZE18R0mxOEB zd2~~)2J`G_9&Pj~j|shPN?(6>P7F7z2~-c6(m&O&GL0f0`$R`&BD!@(Y`;lf68OfG z85v$Cx&+GUtOpy#dq^dg=K8c{Ev-a|B-LgTFxFEUm+foUcndLIL>@KaY}q>p`~~%u}K`ByMoRd z-Or3K-^jd;(i8I(o-mCO70goN1g|XVF&*=Cs@TZfpNX5?${bHu6DX~?%B=O&kQ~%@ z6uP5tj9mFG`po0g{EaqO`I4LFjQ{9$al>J0!SwRQV#Awu;zkQQ3vVA?0rObDT6#ki zbHrF)f<3$;7#Qpl1oxX%*Zql=-0cSwJwVa|vt&$=Po9%_L);>F#WtpBgd@!+7WFB8uY?dM+NpW#e+!K*K{6hGpNei&O0djy!#pt-=9LyzCVXF6`jfPM^YbAALgy)SB=om277x1s@k)ot8^~Jy1*Mshs6~i|-Q2 z9^Oxne7(SaTmDP%XbmN~r7kN`rg{Vd*-j?>U%lk5>Rra6C00VJt`(HqlnS)|%ayo= zLka)%xFkcmkogdBqOJP}Us**TS6ncTzkx)ClMM^W^x^&$N2a1xWM>Bhg?3*fARK zcva>k{o>FP_S&$nNd31WoAvF2sGgg}ZgZ0G0&ySK zzSoWYiLIn7f9i^FEj`CBK8p!QRlo6(^?cwXKNS;+bZM35 zI<{)nTXwTa5P#i~1EQDmwxU_1hv~OZF3>0I4zqSoJNfatox-w|WWL}C*eRLmsSk#=c%4(;w=;+_Vk_)^lS13FZW{`>osI2ni090eHl7Ncjr&z zt^FaSuiE8`dd_f|8Hs)Tk4fLyD8C6$(uqmT#)uMH%Q=m2_EnnIcTg4=6>MkYziZO- zO6Azo3bW{_SXTIKP)A&J$5@1$2Z%CLzp?wyE6~SpycF5H8;JQ~R-$tqO3ab10&#}! zN#3zyW9DMmZT|U=DfE*k-s~c+DSXH5S3;S)dOYm0v$%Mt9PQpbnXxkSVrPuJWt|p@ z*-h@ctnCydTJF|ddi#hc{W!vxO?4RKJ8$eVf9$r2Klj9MS}-ucz7AT*lMS?I6z#i( zlXNGsuS+}l>yj_?+Sw*{lJ6Ae?`jiT`?{t@@Z*h=1}zy$W%^#hx2=yD{&0fiNn;c9 z*!cPc|3e7mpTr1?{#z?qOUX!-9`;JS*0eG8qtb$bNu82RdnpT#jxlCIm9)Ug`5jY{ zV<}kt=Z>VyCQ+gw+aWo#u~d+}=``b!=qs^vUm`JE$cSTC+6w;hFH25ZNAO#0beTOq zl<0yU%}x=oq=QXcultv@2^IEciyF1-*t+eF{Oi|qX@R$jc<6@*8wX?9S1P1v%QIhL z!IpLWxIb6uYI~ z@>Jo6i95ba2KY5zjk{9Hiy{aqsRC@G_5J#czeCBqMG?_=d&1v4cx zgJ|&=M*$mlKs@vI#3WJWdWLsnF@5?AlC1k~z-XTV;$uFe%zq1hFeS|q;?1?K!iJ9X zl97>7+RZYW;Ur2+%5E$WWEx2^2jVOlNkA53)c=T?q9-G1tW6PbRPPYOmQM@`8fdH3 zN8;Dlc8Krj%ZO*!-JxgwwvzaS81duOYUnyvnYDX-O*CtD8NYhpT;atR6Ikh-hVbSR zeYPwpPjt)lB>gi`L9E$b%3gkTO0@7{i#heHU9@JvlP z>B8}gbmr|l{Qu|!^y3`B^I1HHKWoH|9xx4LYp*8oE)+BhJ%?}6ryqFWvL(65sl^4< zwdI4{W*tJbPlouOa~P`IB@mMPHYj+HJCSf*6KPlvqTZ6*;9}!!v}IrgrV!YGJvpg> zj;1xCJtt$~y~IrXwqyf%W56P9yjhb3&A$Jo>X&+>PRnc5vV;nF z_&_{zHy$MsPEz&qnUo)KinOs&;AXFNL)%B%QPAP@z!^ zgEwb$UvzjjLwn&f*tU@0i9Pq`@8Kc*6AIGElxro*3%U1Ig$5OmpPrph*q~jssU>D8vQp`}~VI zR%1i>^olqG=5L9ml42~L4Z~8a_rYi3i{RVA7);acBC$_*95;I{0{u_5;k!dKF#XVL zAn~;#3`xC&fB6suMJb2Cg!ve@M|Q)h!X!MGBZXxP6+m6`OAxwVfKP26#(j25qZ#ww zf%>&?@Ll#<#9Pb{^mN@Nx(}+M-@ArEd%}F;y74qTXi_y$O#2Ls#zVklZ!^ffS_zw7 zB7ywckHj%Bik9Y{z&1%KpiJLG&@qj{?oE~utTELnMjWagP{ks;o>v*iA;$LF!DGH{h#Y`3iZq^dU4aX8*N+S`m9XOTiabBnu!QzZx@Kbph`slxz+lr}Cxi`vC zn^ZpeJYNIp6}Y22APKEksiZ89Penhrs*o3K`_Wy+K=?1Ag^VPUSzHMaKiCWs{e|fM%9})gcqytn@eeK8PymVb zX{g!!5|-R|374k2z@xn_2(9!$-}ZW8hO?vb9NGD>E;A9|d5MP`xaJOiF}wI|5K>NqpQ8bym$&_+ozs zvFlk9m@`EJ+bR?>r|2Wdb}k2HZ2AsE_m!f%xDZU)I~5t1zlPDLL*SCqa-^sej8uAi z@vY1D0G&BBvdoSFBrhht_7OX z7tnpz7i6-oHM%mf|G(eZ$2~Y%nmpQ}!PQx_4Md%YM+c17P;=&bQq0rET)!_;RCj?Q zcj!qzI{N7~x;rC`#HZ4N_7<+=4K%hud$qu-kRkQe+IK*=?Nw1#GyIKP32(wf=xhWWg}dD z>nm6lG9TWXtOmUT8bNIPTr_>6jUa36I)32M7cg5z2AuVif*Q77K&oLD_%wVPmq}QN zzN9&ze<6p!vDa?I+q_gT`gcDhWiBD^`@MKg*bT_$$RIjilQ1l>Lpu-Zz-X!u>XrQk zg!W{De*XxIcM}7L7f~>Cr4XCvy&iT2iqL?@Iq;l)2bIkbcxf~T#DwjG#mD87APCMJU|tb{5s1P=mWg*TAQb2}G>UFv=*d=S*9b1iiW{fZ&T6 zR{1>*>HXG4??SEM);pU3u9^tY)K2(T_c+I4Oqx7m%}4$%cIfNX6KIk9V%U@(V9rj)PDRlVy#J*u)L9g6ZudirE4)W3+-+S<%D*(^_v z94(=iozW%TobHgR!Nb&t9XrU#$;D)-poM$+YCiY-N}9T|^EnxH(U?p+`iMMfD#w-L zy`moMZQ$nBsBwG0{6>PlXv(fMkg}Mfg?JvTsqhO%l;iIR%;`S`RJLO>F%({=P~WUGj!YHW;9>SOEQsWx=nZEx<&54H#02LdTZR#Rq3a!snLP;Xmta zj!f}w;FNHcGdjndSlrSK3Nznx?0+VL9^3QS<`|bddFa73^w{fh+kwWPv4xhQd6-`Tj0+ zWY;3tyj&I;naqMq52T^vr)^-3+zd2j{UsDB--3L>Bop5Bkw%a6l>q+o51KrF0c9Ry z@X-?pzTv###9mxKarSN@F8;`!?=-uOIQIP(Hhc0V&^XB!PczK`vR~9;$@Fz##=p}* z*0BcvbJ7yiT9u1E)_(*t&!&Tl4eL0)_wR8&eBxc9-e_yUM?dBe`eq#{l}ocz!AN_C4%Ahr;V`& zTd?JFS10Zj4XB(Z$Jtrs1CI#pU>3aeVnY_FTPTH4`7`+B zo+l@?GJ?P=)a4L$Ypez00SiJfIep!Q^X&_WVf#^hs;dQ1T!M)(O@GKsmA`IFwaDNTR-h{L8ms#S}fHXTAyj)SrfyObJDw@H5zjqcX_8 zqYjUfdWvlw)FLcDYoJJD0$frn=PtRmj`C;>B@Hv@Q)$<^I$$oWPrH!a-+ zc^~FbVf(gH(nY%|wUL$NzcW50Z}<-SX6DSDH`AV*x4wZ~BuXMP!b9ME{xs5i8IQZQ z<_SeDiQsB<%5pzyZbhlF)2WK)IBGIF4?c8VqQ;dCDeo)(c%Ty(z4#YKxUQLxf0An= zjyQXBvPV^j^`pxP&NTr#@jxE7UQdN*jm)4&zX>RI9)p3NtH6`T3xM<6$wc>sYdAJ& z1;d!980T>z@q6`M+=*6(3oI9*CAVh7mOujKYc~?Ves_UhYY~)~uEpk2IH9XQKotHx zkIh^lg16>rg9z*-kXW1|mbP}{;r<$Mi+w1*T(Sw&KI9Rb&AtE)Z7(#SmrM*n7Q8sr zhO0%G5v%*G;r0urC}X`f1T(If4o@cujcHfFMo$|;Wf0)fxa4$XojZa332%`*CgR*7mtr zx_gI&=C~IGN@KZzcv#7{Hfs3cNWfP}ch# zUT`iJGq#(_(SNrVY8G4qf$r7#L4R$`w!xFw7Zd@u{&s_gYgd7#lWmB*8`csF*G&Ua zchlfe<*A%A(W8WvWET;4Zw~QS90k@keg#E4WRdD{ESeYa0MGZ=2J71sz_g($D86gU^UFRCUk>^?@iNXOyXX~k&MY%g?(8;0?9BS@R@zkMlL4@5u)!0{uf%$DhF(yJVd?5hH+~j%W~)FHB&t6sb(sT zQPe7vBh-A+elvNMKr=u8F3K_MJf&*Nr#i|txuv}_W|9L%+`{5LW{NSB%sw}jldCZw zZt!gMD;y;!XXWTY)PEqoF9bZW?LcLCA-T{z z94&adlsFXe5qznPK%5ijz_Xtr&_sCw`A%XD5%Ue?p56n;9+;A*`^q?~TbyBOe+YQN z*T$bIiqWAlcl3Q{9BdT(6K18KfL{e4k(CU-PvtLq(RL1DMfRxFY7*)8cK}RXbqXc& z`RH5dJ9x%ipWLnU8a)ct!TWa?fIZhLVDPpr$cI;i9xWD;tw-*lo~ON}M$vgrUM7!R z7WfSG5EE_}%kjt?xqdLCN#8JMjfDW^6uQ>pnQ-eg0B6Zwfg1k_U7GbmpnpDfBZrP!#$K)`P!`+Jg6e4Gic^!*-^jzh${{hiRIh=8Kr z6IgEEAsEuq3v9>;^y|)2xEKU@#S2Q>DhsQ zC~O9CHZ($ND$sS=TaJxtImoe50%f%n;T4;Y*1K(letS!Z<_b%6C22ZLzj~Xf9(E(| zB(~s&=GACP=Rd?0PsdJ}E+)gz8o;;CUw|qsg1w*miLD3Iaof^N+?y9miY6t&e;tm)c@zw>cK(t^cco!wJT!*MNCy)qTqa3_)1#nl@5&6;Gy>Uc_h`|DFekRY!(q61+WY20D@zjw*+z;bG4O$YJqHpu}_% z3vnJEl9`7745q*{ua*X!3*@0`1`g6wfOzz zBrZM!nO8S(V>4ImWzpC0%n{1Y+F&Y$e~kqESZ$dM9@ zU^1os2Gmggjgsrm6Gb1B@Bsq@vP8#|EC`TA>to}<_7%(Ea%V%dFBL~Sg3Mu~<8So+ zUl^Qn;4!w^yBZvw=Lp8syz!6ctl`655y+lzi4S%96K|iqgXt6NcpL5mDr8oJ44(x^ zSMr0i;g%oR=-vgVEk6w2$2yT<_X7CvQ5){Czydy9^%Y!kolV+!IFRxcs_<+;B@kRa z1O*aZ3#wo->x=fVq>p;U^rF%TU(OIGX2QU~Vek#kaYsM}5LX#KWZ)UTn_NS?b5 zy_r^zrsm5~8r|~R ztY{!k)y^VugR_K(;TaUGe+&kcpGEe~%IJ=-I%w1T4@f_aMn?+TL7xK`Ji3+-ia$(X zD!)K%NGlHweLa9Ve~cq88#JMvtqq`|=kNrd@E4_AdVp&sR3RU8O)|aB(e#?hYh)zq zAZA=S3|`Ygupu@d->&(RIA@WL89wKe6{iw_XN3s0UU~wzyX-=H3=g4WYu|#QIVE7+ zvKNrjPoZLM64W+~B4Rq#O{J)NM8#8Sv@t`Fv7 z8hezHtztSV+5QFM_J6r2vd>d0QAg9Fdu0gkOM2!6XEFSNwDJYLaY*A zo#1F@fWOD<;NmWOqu1%p(0C{TWqz*$PRc3J*km119oRus>#G3M`y0`*`M=@r{y>xP zb_&n^g26`iI+ipRy8 zg9dHja5Dy$1!$s>2gk9E4hMm%t36Pkk_8*92Z@lZZ@}uADbOk%h7QRLwtDLZ+N%Y_s8->V&U6>*@CM{S)_MJJS>xvAj)tC zv1^Ymvg}~dme?rB{4jtwa&tj+u`|k2fCbM9KzX+hfQ%{HrWrp&VcYh5uzW!yn)*5wee%|XTX+8kw^jyoe;Ij_i+>bT ztdBL!Z2LpLzId73p16@Jaa_y&^*n)^zXMUt!V%w|9BjF9^q=X3Yr z6`XNVhxGKk!S&cQPL=&FqspgCaq)yyXan;}Y?l!=GxR6n&pf5#-!I@=L_EM`W=6t& z`Q?~%xD;wQ)d3vIMaEQ2G58;?8!J;K6PW^zc(C7W1_LMi%Lkj>_kt zkGD2l@Y(_nG;ASfkB4v^V%|cmpGO3QD&T&rKcRlj{ite93aq^v4encwf=opt)YqYd zR~DO)*o$}+_b3s)>UxPjG~~jH?2G7M?{XyjpAD2GV71e5;|-SZ0~ZJONfX?)sgzm3qZ+C` zKb+h}#Zh;)($S5!wWMdxbn^J=MDBpaO$1i7AQ<$WRB*CFmbpFXWt3LwoULFYiTL3oyn}W(O+(LSRjc~t5BH?*? z5Ja_eV8os|q)>ewK33caCAF%s_Toyya@I?9#mWlGddQ#_^Q~~~m;<3#u^vQ5zXyH4 zWs#oTiurabO6ct%PHJB`MO-V}f#f(6;>bQNP&p+728>veCO#iwjQeE#o=Y?F&uJ3c z<9rTmnkS7;&AW%fr&|J%<{)_aZZ8qlH8KADf;<)`xd*!pBzbd3xqFqqBAtDuRPW_Eq;ujE z(x+NTnL3>#cYiA)p9g&7($`zK#jCBUuTC;l*!Jb*z2h>}>73Wpx@~W$XCBYF*#(B& zp1YS&QG6_lF^P(FkWXxD(2M1Aw!m zBepp?#%2BMKrW)-p(Ob ze@uaO7tdqnJHv_W6=AS}I}4nBG>!1Oq6VV9>mc)dfa5ZG2tV@Z0wj0;#Ev!9BgY@g zpy9U~FbtXl-@D|49Y?Pqy$4s&HtPpOYJW2Lcx4job9oGXdh$^7>$s}Feom~r)#UZ=^%MFp#g#H#j8445NP`_TNxhM# zR^4;wo=C{2sxK!{LWP&qqW72JmIftC|Kc06!r(WYHRwpyemur~IbeaU+C&kr*X~w}aAU$EyixlE zgfpE`$}$Cx$MR%!|L`2(ad#u(zn+1+?Uv#P#;qU>wj#=^Wyo{Bal|{#GpK264Z1Kb z9gcnUMbiIi&aW?+Lb#lcK{v0k6DFM#zL;J5Sgl_3wpH z;u2xH_{dtqDSsE9HoXZm9N|JO2MP(@)^aXizE9*YpAWa@E+r1TNr88#4iN@hi(%vR za?YX3pTzsqQm}Yw6j+v%iipRF@GAm?!OCk-O}gQjZLDY zP8pMtdu6G_LMJk1ogt~dDF!`Okl`jq*u$*&N^(!(Q*xm95}8zELK3v63Lzbjx{c*-abWNc!sV9_JyqXfnXrP$>LehhCiOhd_ns~8d zGNo90hRiRn;p9Ct0C{fjiLgIAuxIyWiO$R_Q~MYed{8-xjnQw2mDVO;O+qc9TmBa$ zMZE%-Cvbn@oGQ-8OAm-WiWMLyMhu4!nFI1)HeL+Q5K8sNKtvB2 zc2WqI)3FQ~2h7D+CSJh)Z97LOt?`FB+Mn@RW!WHrxh{snsXZ%r~Xr+hI*i?dBNTc{BjOm=uM^f0rYsnZvoMsYH&&p2J$k zFhCvmg5RI3;NSFcnEdNG9?j*G^cF)Px8e)(gBQ{IcV|(>x_UG;^*z`lzXZ%)69wh# z649o0!|-!eA|BPQOw@Y`LFMdFsNxcdCePjqw`KLCmaCtzou>DJt8pp7RB|wk?14){ z%8^U=5@hMFPkE`{M`Th05vgiMDc(1R`{P~U_y-60CNd8N*hr(I3H=_0pCdD8(PYy5 zMza0-DJZ2;1{tYHGSamidAH7iMI}7AVtWxvSgcBRP3|V2huD#Y{<382lR1ct&IiJ{ zRY>#87RU>H4C*S)$^K7C=#RSr=KM*4EGxDLd-nxGm9_tfvU@G2^L0N0zgHA^9b}HS z_|qJxL;I2K(kDneelzUvMd*+H5az0@0+P?a0rmQ_WOlYTx?=f*Q=CiS8PsOjH|;qF zwr4{3)?(-|_+f%$9VZUUWnodmM3h!`*EG3X2WT$Y3TxITVt&7)P^@1k5!!ncxQ$MR z{hMbKPbXEuQmON}Z^JpX*jx!5wx>{saoB%g} zUx$5|^_93f>j79>fpE83X@twc2*UeY2Oxc3V|BBBgACUp(CFez*v=%;l*6e&-}D{P zHYo}W6)zxOyILW#bTTplDa6^f5wJFK35aQ317AMvLuE(aK-KtKxV+g9d86iDxZO^WF|VjrvS~1jWN5jb_REC#YyUt$vU%J`zom3JY?3tSA()+mCcrf z@8X*I%9*Lj7@5siImVS@BF*514C>C&ddi;JWR|y~-R$C`e9Etlt%nlW4;^GC7VC?b+;?dnW zu=(gHwz5+LyUQIX^xZpfjmkw}+D`+#zi5E)_1FyQH#t!I8wFM@iN{wy)4+qqb>J=g z5aMEFEC~8?84OxI$HKYi2!$_3XzTtk;(b*YsF6#DS;;DR%M)w3n&~CFX2)SW?N#8x zJ1?Qjw{7@0g_ERGeKbsZ{SXF4+5k#c0c>oUjdtHE1gg|Ja`ve*5Ioy}{IRj0B@6aImP6+pEw2%S(1!{oG- z(8!Z5Xj54h2<0(k@~tbx*nD?rUVa-&Y_gzi7?0#V6p$J1M?hg?D)@V|5jMW_M_bY* zC?Lq4Jg3}@q=SRWUoB5e6HgAJ*{K^rv@iy@UGT^BweQ4PH!BBrC>EM2J22ewxU19= zCk5^r*$Y(bh7>CMLIHW=LFom(HWmRl572-m-i z=C0{S+#P>d>~4(?TAJ8JXde6k!n?D3N5Ef_0GhjW zpjEdtavD}AA|-slJy!y%L%TrY^$S3e^~tx0!+#O7iIVp9MIv^eZ~vVhBeMr^Aeq3ZQ>1hq!;z8uFhU2M_9{ z$TJyX;Nz=AG`w#Wc|5lm%yKC~Rv#9CD_4)>mHS&z^j1sq)2pvAMDYtzcWJ^$WNLs! zG7&Is#0GAP?L(*Z8SJoTAyKqe3g#-jhd-jLvBS4zi1{5UaBM>{CJHYFb4LIg9d8D> z`3hp6Y9`l+t^X6)EME+dE;x$jJ{v*p5>v7+@huY8YLVHWFL8ouexQSwmB5h|nfOi1NMiGk zv#6IyfN_DIT)p>p)ZgYI@5h-~Hmj zirW{Fe8yxlZk;NDYiuAxUOXV{Rc>G=#?8dZPzWx$QPAKx0G-K>*oJ#0FdrKvHnr!2 zw!kdl@o*(^XT1yfd{79r^p)VsscnSOvGqhPTn8V@mqT|Z68{&!0LTT}fuM*Jgm{Yv zLYD8L>(fyDGB=B;@cBZ#K5PgZ*4}^{p*!B9?gx{q*CM4+V>~u66Wz|4@Q@h`z^0qS zV3VON;gMMhc^)J|zbxz&m&MyC;~?HUD+Yn1R6PA2Hr zn+|Zwkiw5OJq0ce1S;583-3jBayPW@rSxo5$x6qY+^{Yw%7nhc&3_wC+PXS&ogaRp zZWkWlzR5`Bn(ewu&Gl9_JLfh;mLvlz>SP7i^MH+6S7A4K#m$2H^;L#aPHdpkLwIIG zRyo}FoJZWD?z`MsReI!R-^pCR|7*Jruqcvl3rHA7;t&OtAQGm#tE#&P7>1yzAR+=v zHjM)ef+Ph*L03T}tVl3|n81LTT~sod<07JB&LXD8ydo;%>w#r|)2eU(_xL_&s;lm; zIk!)pdwaTj=Gi-{?RF7*9%n_Lv{2w>A56l>#d4rGnMov6rGWapDR|bblia*Gb*Sxq z6Aw064)|$H@psB=frrK+`1=fH!vz5o4*s>^q&AP?(Kt9gNax>!4j)T~VLs~GR&mhz=XcaN?Y$Q4L z?SWF`S)%BT}cPi z)~-j~^fP45mOOOKsR3V|c@J>UrJ%KGX~gKNhcMH6D7kIuX)pv0fELv@Ft>IVnH0B* zJNs!R956r^Tum0>i*$|2=6gP<(_j^}AD;u}20aB{wTfs&c_}Wt$cxlIlZ`5>kD^h# z3ea<2Gd$C94rLwjLX$*t$TLosTvJkpvd&%vcZzdC=b&|PUm!plhZZ2shE}qqI1f#H z#iC}J`&%}@vn3~0=|Wq@Hq3ed7Ln$d|)X$P@S7 z(T#YVJV%_M2VE_Mn{G`bho{^}3JwP3k$Wm+@JT;%_px;d=B}VSwe{%N{C6aZOCj0u z9w_IC6}f>GLk`@kOj$fnr|(=JLMb2o07d-wBR6tn+AxPdwruqoT4e9gn$&`@! z?msR3w@5`q*pN`>h5@E?L<-zoqYM=yqVXrICNtY*^f;43FJd2i4VwkmpQ> z;TM)4f(o;hpmS^+ciFH!csKJI@!^#^ypU}{c-`HDd#!CCqS9gr7|;l-vuubMBX9Ud zKqGU%VyrQ9BcxK-64ApEC&7x?9G)i;gdsz?HM&_GTIkUN}UPD^AACH z`J;G2at;3Og%4p+>H^b;^syYgDF`2Kc>!-XK8o$nXeRo3KzQKyF2ZG=K6kZoIpLFP z4Vh2=vE;jNCw6uXhD+Nt34cv7YCA?@eGSWrHwWdgGLOs4czl|7q zFc}=;%z?WN)HQ-dD4QZvFgP+{8r>Eeb_l%<~~t+qXaysQvG zE*sKFotQ1AcW!@z`fki52QM=rjUNx7+MnmrhR6EQCWcz{OGk`O(J(|IeMA(NGM;*@ zTnZyM)l;&e3u*ml99%vml{gqXhFEBw4x&!Ff%v)%%czOkplP2raY_(Nblx%t8*8Kq zx3g+Qj_)?`de1VDqppE@=l12A2D}GYMPE>OvILjzFaWtZdW6onUkLe2Z(-mXIoP~S zmuOG{K(?)hsCvPI&H?$*wzLx)KY`0!Z+!(v2v-towyKc7u#2H$%P=&i@jW~?eiw1+ z)=p5`ZVV0QUxf4DFT(g@Eu@;AOj0|Xz}1-PNDveNcDAnq;p&&*xcn?~z$t+K&{%{U z4wM2T+ntfONjzA!@ILH_(jZwD0dPA$7)}o;0;XUwsS)9Y70qx!2hX^Ia_(--;AIp! zb;KC7G_?j^*gX(V*S-kW!$&Z)gNw7L?;w@67NUr94f1`ZIrCnfC#p?pN3m{BXmf}$ zI=!Lmvq4=$ryrRQMXo^)Y}Io~`7>=VQ@?Lm0Jvhz+h#tW5HD>3{>; zwcLwml+d$N(y(>oF&O#5)QVqwk{;Z|rk}l6qjM)7p?oa->2{|;Ds)5%uc>1m)#kp7 zwl3&T-%{#N`@gEh5HL+?>$AMB`fCk{QKkm8+Vt}7V_ zRPYL8A@6#ho#b_`W%Qi2%V<8Q5=D#F(+}^J(lU$2;rR#hNUX{PgEN&;Ez1|m^HuS3 zr+q}-pKO@rz#|9G1+aPdEp%+TK6ycLGvJxB$ODG%M0(8uqHD^2LU3{liW!-Q?!R4* z&(c{B9`5AB3R)dLx}-_U6wg69m?7G^nFCX`i$I3*Xi^8ii)o`&sBmxrG4!`gt}w|G zU2JpUUR*1SKWh#F8cKOs^Q>_Y?0sd?Hu?n=+-}8W4jzZMIHu^gOJ1-az7#Iqp+yKh zw&SfjaWLGw5sp!L3(gg&;Whr*1br%)5RHEfaw5|~;k68KABx)@x_q4LJsTyP@mXTdKOdg8i3#K+DYuqT7Xer>+xK|8kc=q0^M(V7>VeLMNye3wU~#Yb&<~Txr-q6u+s!bD#>i_~&Rm*EE6Hx8fRU z<@uv#7IJ9UwYoHlAxDu(kj z-0?v|U)-my0Nv$;NyhVtV_g#&_p7jPZM`{WB};w zyADsPk+t+z97XI~xC8{ScMy^J`Q&|ZA*@-Hgtoso!T;EA392{0hpZ?aI7`QpT)y1| z45^bMFEw?LDHgO0zfA=-9`cw-lJl;&+_Af@bbQrF58H2EP5t{7SL=3Nq zMyG2%$z7vTaVt#)`18OgcqkGBmT3CS&Sj3Q;58jba$5Z5=T5n0|5_N38^=2~G&<`G_j^S_5g);XpI-m`4 z6Uje5)Q~m0eyC~l{kA4&wWxZ@;%U>itv0%7Peld>TYNAM!21)b5y3$9{ujp_c>fr zYATn`Wj&;3V)-c8DUi;6HJw+ao{PWwScq;!j|SJU;i$c6I21ikv54Nf9-6$8gL@jS z5PQ%hXu+KKTfNx~F0@Od`2h!zl1>znIrSB;60{%sQyYnUcDgW2+8+8n^#|mkTI|vx zB{KilPSmUNPckG0%z^UP@!0w^@k~r(EFTWF=TUKnk}`wILSo ze*q6H`je}`odg_LZ^5Sx-GcWOW#FxZ5YOLM#GKJJ;r@Mp$7e?CfPP2@?8@DNt7^}{ z+S=q{%wtIvEvP}ajqheNvOkh%OIwIz?v|A@)oQ<>u0Ir z%EMM&Tn1dm&BtQWRw4Pl`-y#jE+pot-vni&&*SspVOXTO6t|dFhQ^JoCR)?xp^rQN zMCdT0^xKpvfPQbg$`-r!KOEm&~? zf$iJ0pk2N_Do+>>2dhtk&&wMjyuF)8>QWiXY*2*$2RQy%N=| z(t%tmv*1DvQ)*IBE*xf)NtGU1h)N;j2{fM$crh=6GL1$3>MGST+dj8|G%gc9DUT0!QNWPH7TBcV!4JE)SG6 zJ>e3+0_eB14xf#e;M;g!c(LX-aOz_h*mbHDBt%aq7VpWy#4S?93K0p*-*|#eZi9&X zA0`mdVn?h?l>~m1wqlWEyfNjze6Zkk5%f$ui9OVt1|JzN!Ly#Ff~xEF(9NgavTWoK zsPoi-u&6UfyQK1nyEa>3$~IegdHzwDe9aVAw#yL198VJRUbkW4&^(}K(N1h0*GMeL zl4Y*bxMGBnA{un>7hFqeCvLKEC$xFgN?dqdKww7ts90(^ez3g-TD7^N6E?Nnmw^LO zzH9`MLOmd|*X;r`yzgN7Pb#s=&k(m&Jqv|&o+BDwKY^XIH-N48OX2b42{19y7!2KH z0PY89iW<%}j5ea?|h<%Oh?c@#CEb^$#?z90Qk zD~HY$45t+rsgMIRDk#CcdDM*(0Xi19n7W$dNGq(qj8_S@(P@PQTz``<@IIXcbXH|> zrIiBk)Van4)wTehJ2R2+-);ssIR6Pv4a>o?rE+j&O$Pq%DF^%IZVH|}42Sn>6W~fW zS?v6EdBQutk|=rRg5%k~@Jc`zn1-bSd9N1)`_&A*vsnr0tZ%h++o2BX=on0a^BecZ z{(5AI1rf2{8Q8^_EVNK_B|d>}fd@x?;67FCM^>14gK*DX=o$A9q`A=!v7aWOlW{Dp zY4sjrx=|y_i77(iH%nlUe<;}Uq8(65T)4(!(`kH(c_eh0ugdKsuwt(58h{f` z&2a6hMQCpobDg$J0jez>hCe+ek2#Abph=IHqkdBe*r~r1xQpVz;SWVn?f~?E=J277(NCClH2p_ z;aSX*%2n1udTX8_i~E~N-l&b_(2`eZn1M7}BLAFdIH2xmoSgOiBVb>Sz8QGuk5LAF)R-A#hJ2Ch`>L7m1 zeVrwKIhF8G$^{+zH{gpyGVrbcY~oDW0&L=)RIv4|0^F6cflv!u4ZP=F#V?Om#M4)% zfseoL2W^(E+(MZ|Y_N_m+-1tL73w6`>|Ll>eKa|_I>FIg(4XjErCK;wa9!7}GHZFV zUD;aQD*0u;HCt{e*@03uJAF5KyT(AiZR>lRYBM>ln!NRQt2<7e73i7eSGNh>Y*zFS zt}d<^QWLt{RdD*ij_RXx0&6I{LR%4Ey86~{QPrZ1$g0d!`vvPSFRO8_n9pBpoKbDI zdSunR&gWHuX^X0C+!OhyJ6~5_ym7PgWx*o>s`n``TX(<8H1-`o@!Imr5Rb4*bYXhc zvt7@swmT;B=bQKl`bSKydR;i8x-_=F%IRYz|H^dznnkX+_%|ur>fQQpsvlMB+rr~5 z)ssC3S2sLeU%6$|K*5qw^{NSIu9gSw8gVGh35W)8^W&OK)rQT<$vDEa+E1K<-TKq=zPTH$}&5 z0}P3}cAUN?1iy+ahehQI2O zTg{3&ysA6d8P!vpkl?J!WPSsU39=H5s@A{w#ctJw)m70__BDaNq}@DSOFJX|6M~ne zJNVAyeyv^`(zixiTdMj;aANhoOSN#~Y zr=8~Q-8c$4hb8mE;@{HG%H?@)_dTN98(yH78*Hct`7h|y$}D=;?M(W}iw>$^^KzIY zT13Tny7NZwUqd~5Z$iJ)n$HWA!%5eu3X*CmptP>7KoM(Z(<6%B@bnL0a8AQfN>r49 z8jaVId0`JxnZ^dF7O;UlpqL3a1ni=_!X2SsNglalX)>BIMhE#97$E7O^{CQq6)C+- zhCH9Mk$PCWjJYQ@kV?Ngj69r=p}H7(o{7vuWcb9EqWWy6BXmZfIltx7!kaFXY=kD9 zZZ?XVgdIkQE9-c>TUOZ)I2_K;OE9uYyvVh+o^aJ>KPG2u<}%#IicYp|oTbO_XlC0s z78}^=oJ!%Fb~@PRJy~QkF@1-PLeo-POP?R&3i%cH$*8$)y9_M>8+Yvn%l#OnmLZ&M1z ztJg)8U@ld3K^4t1JjwGbR3qi*IP=CDI`bM^X`Z@UChayk1$B%$P5H{6r~UT6CR<`x z(reUR>9?C2$QOyW)EK7%-tgp^bkzEr^h2u$yzJ@=DCE8cZ8hGR@>gDjSgx1o>+6s5 zw!FDvS9q zE4yPq@yP8Ou}@H~SGi2>Y3_v@kIOS_Ei^aOI@~F?QY8A=G+vM3g^j#uJ$18_RjpzK zFMzk(3VobP7YmYYcBW)oC9iU~$-j2U`T{OvvtwX`bsxNy*SSR5COyv4x<6N!_hhcQ zjh*HYo4NI+R$eW~tRD71Y5n%zZL31-U~5pAVho7EqI+147Jl~xh*Gp(ZKj#+CR z*RVksPCLr)OSD_7er$-8l#!EE_m7d&0ZY!7;%)3g8MZD z1cTP+7_|QhgU&x=(ES`kzt1q}{WJzPOP?dp9NTDdB=f=zII=xfZbUylP=+hT-Yvt{XBlyHq8WO9i-v5LF-N|e;&BX* zO*jfs;`rF`;5fD+YamCzXH|TgrRpqGj&jd{UYq{|?tf19?-KZbvl7ghRb%qys`+}w zbjR143B(|d0u!xZu}~rcX1xu`QaA5yP)x(3w?Q+Zme^VL?vU7Vdm9ovthXVt!+RSN zJIP^6k!HwCaM+&+genVg`U=D2B`jxFbsU?+f*ciLq*xRi9UeV}SqJfAwkiur21SR6 zX9+{XgX0-5B;zG2CQ2L~KO-uR89*@u62y@)!HhUC;-bl-IcmZg(?YseNxXQPa8{(K z8@CQygT>>h#))GSB>w-B4d%u5W7zjKGrMiR(SMC8py$uOz9C{lP(m3`uddW7pcZbAo zNN+=8=hWMf*bVJ%^pdSbB8kI3m&D=!B#F-dEQv0h&$85r&m__HrzKH~HS$}suEKKr zTg;gZ#Sp3g-^!`UNTgbl*-+F5JiJnUM$LWzziKa60Np@5ATl*wqmr9g! z)c5%5{s})vf67mfAMw-kYkvOMDM^X-Z*!0tOYf)osmc(@tCv8C36o^Um``_1;P|ms z{G7&)bQr zs}A(o`t%euzC8tvUr#~f&lEJwkN}Rf6|)KCNLw?TaU5wIW;326&1W_fIMTLkdDcXZ z42inw@GbC)f9FU&^mjcAKgpaw0zZ!9^HoHZKg+Xv^e2u#>-mvyNuVSJ8xsME3Ng?n zl^ugwQrR(JC6yh6@;{Xw6C=q$CODFTOjINTnQ%x3PV61{W%&Ctof?c`cZ|O5{9gMz z`TILv5|7<-Iq7?HDf~n(Cx0rJq94g+(ARSLU#BZ2*1ye9W-QmAmdo$O`pZ5(lZAsn z&BDs8kWVwJB1_DXi;syHMY0uHp&Ys3m>JRW-ACA!v7GX`W!UGI;s3Fm`j;hB8b&as zVQ924Vx}l|O3z8h6prXC>KizPh@xPR41TSJfPKeG5k)%xd&61wO3*@=FrWYd`k zf4@veX5U8il?;Cy8~vj-{?Ai>h3zM&MrQTzr}+xmcc!ua z(KM4d0bilf_zTTvmBhDU*!+9{3GgTU^MB<3TP2YCXRrS|(XsuJ_lX?;ucFiAz4z63 zd#Ap=Fn?IEd?`zkwxmWD_avgw-n6grgK8%V8OrMA7F~7ZaMoA;BO`Q n9`*aS({5q>ky8nz@E%B0KZW$Ej{XZsp6ym1bI4zhPVdnhdBm$ zf=vr^^a*w4U<4X72WSk7>4h)l{2~hcCHW@CE<^7br zEGi>qOjw@+)P*a7`hWrK?z-tVP@@S68kxWv6S>#U5|t4(#jlYSW*f6c1B>bZwOej7 z7>XjrCL(anh%;y{*dVbLH>yNsB+T*afu|&e2^)dh7)_J7I-7vnEQr>&39RkLNv&0) zGQyVlwLzkB0yr9VZEK%1s7WAvM<59xMQ95+K5rkJ$iXEeNkoc+#%BvJI9ArY%$Ug~ zBT69sKx3t)7ibrYY2LYaObkGf%FMt3W*nR#CIu2;1Q7>5E!@Ds$Xw3Q&IK3a(|;q? W!^JU^iytI}AB6vI=vFCpzYPH1UtRG4 literal 0 HcmV?d00001 diff --git a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs index 382941d9a..299337cde 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs @@ -84,26 +84,12 @@ public void ModelWithSelfDefinedModule() [TestMethod] public void LSTMLoad() { - var inputs = np.random.randn(10, 5, 3); - var outputs = np.random.randn(10, 1); - var model = keras.Sequential(); - model.add(keras.Input(shape: (5, 3))); - var lstm = keras.layers.LSTM(32); - - model.add(lstm); - - model.add(keras.layers.Dense(1, keras.activations.Sigmoid)); - - model.compile(optimizer: keras.optimizers.Adam(), - loss: keras.losses.MeanSquaredError(), - new[] { "accuracy" }); - - var result = model.fit(inputs.numpy(), outputs.numpy(), batch_size: 10, epochs: 3, workers: 16, use_multiprocessing: true); - - model.save("LSTM_Random"); - - var model_loaded = keras.models.load_model("LSTM_Random"); - model_loaded.summary(); + var model = tf.keras.models.load_model(@"Assets/lstm_from_sequential"); + model.summary(); + model.compile(tf.keras.optimizers.Adam(), tf.keras.losses.MeanSquaredError(), new string[] { "accuracy" }); + var inputs = tf.random.normal(shape: (10, 5, 3)); + var outputs = tf.random.normal(shape: (10, 1)); + model.fit(inputs.numpy(), outputs.numpy(), batch_size: 10, epochs: 5, workers: 16, use_multiprocessing: true); } [Ignore] diff --git a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj index 58c176e82..3910eba1c 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj +++ b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj @@ -65,6 +65,22 @@ PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + From 7cd829288de2f04b701ff03d29edb25a4d151844 Mon Sep 17 00:00:00 2001 From: dogvane Date: Wed, 12 Jul 2023 16:58:25 +0800 Subject: [PATCH 089/182] fix per_image_standardization run bug --- src/TensorFlowNET.Core/Operations/image_ops_impl.cs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs index 0ced407a8..318b8b142 100644 --- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs +++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs @@ -102,11 +102,12 @@ internal static Operation[] _CheckAtLeast3DImage(Tensor image, bool require_stat { throw new ValueError("\'image\' must be fully defined."); } - for (int x = 1; x < 4; x++) + var dims = image_shape["-3:"]; + foreach (var dim in dims.dims) { - if (image_shape.dims[x] == 0) + if (dim == 0) { - throw new ValueError(String.Format("inner 3 dims of \'image.shape\' must be > 0: {0}", image_shape)); + throw new ValueError("inner 3 dimensions of \'image\' must be > 0: " + image_shape); } } @@ -965,9 +966,9 @@ public static Tensor per_image_standardization(Tensor image) if (Array.Exists(new[] { dtypes.float16, dtypes.float32 }, orig_dtype => orig_dtype == orig_dtype)) image = convert_image_dtype(image, dtypes.float32); - var num_pixels_ = array_ops.shape(image).dims; - num_pixels_ = num_pixels_.Skip(num_pixels_.Length - 3).Take(num_pixels_.Length - (num_pixels_.Length - 3)).ToArray(); - Tensor num_pixels = math_ops.reduce_prod(new Tensor(num_pixels_)); + var x = image.shape["-3:"]; + var num_pixels = math_ops.reduce_prod(x); + Tensor image_mean = math_ops.reduce_mean(image, axis: new(-1, -2, -3), keepdims: true); var stddev = math_ops.reduce_std(image, axis: new(-1, -2, -3), keepdims: true); From 0cc25fbc35eb406c4f7e93ae9894633c03bfadae Mon Sep 17 00:00:00 2001 From: dogvane Date: Wed, 12 Jul 2023 17:00:16 +0800 Subject: [PATCH 090/182] =?UTF-8?q?Add=20a=20function=EF=BC=88get=5Fclassi?= =?UTF-8?q?fication=5Fstatistics=EF=BC=89=20to=20count=20the=20number=20of?= =?UTF-8?q?=20label=20categories=20for=20the=20image=5Fdataset=5Ffrom=5Fdi?= =?UTF-8?q?rectory=20method.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...processing.image_dataset_from_directory.cs | 32 +++++++++++++++++++ ...eprocessing.paths_and_labels_to_dataset.cs | 1 + 2 files changed, 33 insertions(+) diff --git a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs index f42d12cde..377ac4de7 100644 --- a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs +++ b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs @@ -8,6 +8,37 @@ public partial class Preprocessing { public static string[] WHITELIST_FORMATS = new[] { ".bmp", ".gif", ".jpeg", ".jpg", ".png" }; + ///

+ /// Function that calculates the classification statistics for a given array of classified data. + /// The function takes an array of classified data as input and returns a dictionary containing the count and percentage of each class in the input array. + /// This function can be used to analyze the distribution of classes in a dataset or to evaluate the performance of a classification model. + /// + /// + /// code from copilot + /// + /// + /// + Dictionary get_classification_statistics(int[] label_ids, string[] label_class_names) + { + var countDict = label_ids.GroupBy(x => x) + .ToDictionary(g => g.Key, g => g.Count()); + var totalCount = label_ids.Length; + var ratioDict = label_class_names.ToDictionary(name => name, + name => + (double)(countDict.ContainsKey(Array.IndexOf(label_class_names, name)) + ? countDict[Array.IndexOf(label_class_names, name)] : 0) + / totalCount); + + print("Classification statistics:"); + foreach (string labelName in label_class_names) + { + double ratio = ratioDict[labelName]; + print($"{labelName}: {ratio * 100:F2}%"); + } + + return ratioDict; + } + /// /// Generates a `tf.data.Dataset` from image files in a directory. /// https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory @@ -53,6 +84,7 @@ public IDatasetV2 image_dataset_from_directory(string directory, follow_links: follow_links); (image_paths, label_list) = keras.preprocessing.dataset_utils.get_training_or_validation_split(image_paths, label_list, validation_split, subset); + get_classification_statistics(label_list, class_name_list); var dataset = paths_and_labels_to_dataset(image_paths, image_size, num_channels, label_list, label_mode, class_name_list.Length, interpolation); if (shuffle) diff --git a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs index eaa762d89..232f81eb5 100644 --- a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs +++ b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs @@ -9,6 +9,7 @@ public partial class Preprocessing /// /// 图片路径转为数据处理用的dataset + /// 通常用于预测时读取图片 /// /// /// From 68772b2cbdeb431a432617e6a5e8bc5e2b2ed754 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Thu, 13 Jul 2023 22:51:49 +0800 Subject: [PATCH 091/182] fix: use git add --renormalize to make model files binary --- .../lstm_from_sequential/fingerprint.pb | 2 +- .../lstm_from_sequential/saved_model.pb | Bin 755111 -> 755111 bytes .../variables/variables.data-00000-of-00001 | Bin 61038 -> 61038 bytes .../variables/variables.index | Bin 1373 -> 1373 bytes 4 files changed, 1 insertion(+), 1 deletion(-) diff --git a/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/fingerprint.pb b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/fingerprint.pb index f6ea8da23..c37cc37bd 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/fingerprint.pb +++ b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/fingerprint.pb @@ -1 +1 @@ -沦Ʉ%̟땐͉ Σ(Ћ܇}2 \ No newline at end of file +̟땐͉ Σ(ռ2 \ No newline at end of file diff --git a/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/saved_model.pb b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/saved_model.pb index 6fb7c3f0e8e4a35afa38cada60f78a6097b84501..618c800eb45f0481ad202d25bda12a86680eecc8 100644 GIT binary patch delta 2303 zcma)7O=w+380G!H`yL{e2#Sp_@!iJf&$Rd6`A@a6-B?W8DiRbPNt~HGvk;>7hc2}u z8X>eQL2fbV#zhwCq7q&Qx|N`4Hbq256c;YK%0}oy7drReSl@Ei_s)FhoH^%wTkA)+ z){kC8n;}Fr!dy_mq=*2LhyjL>2GS5sQXCTlarW_aWoc)$dE?b` z2r39LMj4hGf|QQzYf12RCA)hfSkCs3H_6Wa?%TsqDo&VW2x!hEKnyYQSC;L?waUyXubXoUQ`zsgTJszG zyMaCNZROUI7ny>4c7CQ^x7&m2pKE=hL>zM=fMAXciWmnXi8L@WcAz;$RAo1Lb26J- ztDLmGFRMSD4Vg>~L$L->=7eIdfFMu-tPMm7jS~^W{}E=xo3&u{VeQwR*TG!w!&Q6r zq1vT^4?3Hpt7ex_aLBHosiC(*A-DKBXGsKeE4_OH&gePO@8@_?5S#sJ!Xx{Y&vFIsnAb;162tnt-S$PAVQ3JxG7 z2|y-+Ksb&-K&*8tWNc_*g=O2n+<5Uwm@p0ri7|*{mo=n$9yk&KrKO8ZA_c?pfY9Du ztw-!1qsBzPpU1G6f+;3$ig1C}3Ijzn*Tm3-=+dN@k2RjPAN<@{p8e9B^NF zu63c;_vl)Ck#$`-S0{o;?c;B>u1|VFE6JCq7B<`|pB^X-ur>EXvzoc?IBDPAY2A;# z12<(|nSMX(CH0tVPKE8oBVKtMJ2dl5`)}d96{d@px7V(=x1aW6Grq2hPT0NOcAem{ zgiP59)@`~I5Bk}`neLNW@7Kv(JkMpl#cE(bUkw%?^QxS2J}wT2?AguWdg>LTT=S*T zd!5nCK5Ev|^|D)=Ek3v$>l7hMaR#{2!lN?+Faj;W+9uGZgkv#K%r(C) z{&r%@SeSB)A+SOw05hY3;3flxN+dAh!ese{gOxM+(l4c``)TQqW@yQzTW^&<+82b# zy~`TAi}}Ym+UZsnO8Bifk=*m}vMP=I%Fl0=zIZJh)E-d2 zbtLHb+o1k)%Au_Sah-GJ?vgLftP+v}q67rkLJ9;!#>++(5*`^-LxsQox%^5i1ex2> zDgRW+zqwdj%;)|Zn{=mo<>jY>3@`%H{nMzNoC>1$$ZzxR&x4g8+EZM6T@g%ymB!2M zeGG(5HAtA0BGt^|B>#TBc-rm#r1I)?OteNSQ_7n@^WGA~0H&A)Os5&;1}mcIaHd<2 z)t+(px|Q*%7+NVUC4M~xl>keb`kC_7zd>Kebf$GSg6zXPm1P>#>QT{gcWFnJ&4$vM za6Mk_wu6DSU3|6r`*T5{*y&EJMmKcO25mQOci$iDz8=PZV diff --git a/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/variables/variables.data-00000-of-00001 b/test/TensorFlowNET.Keras.UnitTest/Assets/lstm_from_sequential/variables/variables.data-00000-of-00001 index 83f2a2fc81958c3a0916847f3ca7e7db48524b37..ea67db4f4886a56b01b78846fcbe2561cadb7691 100644 GIT binary patch literal 61038 zcmWh!_d^d}7p`b%XlZH@Nf|AbKKEP=v?Q`6BO=+pp$HA7lD3M{PE-^b)#siYN{Avt z8ObWEG73?=y??;{<=%6j^PJ~-j?KcAe6hhj`sn3*2(Y@r)iX1AZ+#_K+HAyemN!nY zpT<4?wRxjk1Y2Y;#Kp^^ct~{^{Y+XgXzn_C?nfQnyFDAWwBDvGzXsF87O{Mt=U<#C zUcs;3D}~+1i!msrkT$d!@zd=)`Lt`HRC&y8{y=RN&(*%f+fL}S>SL3kBc~Z(JQ_}a zJ{Uz?9uk|wjcfR4XFJ^VTOWG=9YJ05?U)v6%x@MHvNMasXqW#AZcsZGcB#%KFL{l? zs6vI0IZF81wkU{t7LQe$kI0r5Q7Z4EgpYU;`LkgHUzjPyPk2qBZb=LIUfDr5&ioGP zyZ!~dGUidE569Rd-TNMLHZ7r9M%CE8R*ag>8->s09O#(AnUGdDhF%`jhwh`TM1Qm* zUDPw1HjT3g26U@|4W9UE%Nj(d zlkr{)`B5oDi2vIL`sR(~$QpHi&nO+WAFbd~HPyKBpei3%;K-lf@56sDilFV3B^UlN zrs4N~v%8xuxn2)|!hKbqKWDix&0UG-?9}I)?a{cT{4PxFb*9ci8YJ!QDD?e#6oUVZ zgURQkVWfH+o^Cmhw@&up-yd6{ika{|r*ybcXD_q+{2Rn$b!oGM3a39MXkL{yX{gnK zSLz%;BtJl}yDuOw;2VzJcTsqcf5NwwS%MVvf5K2zEjs7vB>H<*9s1=-v;4DT&?}%8 z?(SO3uSu_=u~tc}eXkY2ygUf}riL)NE<^sR%$Wb)<<1X;>T^3w%6*hYL3dFr{Ce4s zOHA+M(~PNf_1*`dBIt&X7CkUO*P31p86Zo#6hXlw0kO`t>^FDS@@`l|`4*@&u(Q~aepBgGc7afVfZ_ab5vuYC*E7{Tk!48}kxB-7X zd2TcHh%(jc+=T~B$I=qi<(;0YL_bBGzEhB)_cfZxU5x-NIdmO2<^9H^D`)a!bE^b( z-SfHc*927h>Pg4XH0SNN{V}I~EcF_HiIq89VvMXc6U2YS8_UkZKS^~8!85?jZYsyi z9`yZdYZhEQ4^>>WXjk=DY@c`!&}0^d&*;U@RzmK4FyoRF6zKB2c|3ZGIhPO}!EY)> z;7w``a;ZSQRQlk^*wH*zz6o1%KV$bEL#$W334T4Q{H~83UA}7&WM@2pW$|+GGpY;v z4_R_|>p>W_GNe}@%W$KCEEMURMHh*A(D9NQyeC?hR@kcWk{8Z6uiS>+J7R?YEt^7z z$$Ep~o^rvgMmwV6U`t=z*~07}+=PWXE$A<%NkdE~@Y?m#u+-oJ%&F}s5vNpXhC?u( zx-Lee?u+r@`~rcCPYjI9FhzcDH1|2?%@6Lkgzy|d3z^ewMfhd-Gp7hTP#H_7Nz&zq zFT==)NwEISbUO9TS8}KG9_%Em$>b|bu|#wg@BZ@~PYg=Zg-a92+nLt1WwbGzA@6ZS zr6KxTOrgJyyV9|$llkAqzhL>g120&H13Vyfs*eaYJgi7>p7ns6>N8oEr54Rp3&gWI zk!;}yQx;x2gSNHTK(GFM`k^vac;85dUOh0JZluPPPAP+#62>&+;7&At-HLNJjOAsE zHE0EM;+EWwnpZU8qvL9PW71YktjdL@GOvV>hW^9j%GIcuodo^+MbJLTf=={u=PHKc zT>Vf3$QDGy3l9UVU$hy6&L*Sp##}HkMsQEa26M$>_)0Z~46;u0^S=!IB|Dxj%3cmu z)BeL+i8##Lzl0j)R0<38Q`psi+1TcC3s#xEft3R*NPy}Lx-2*tODt}{>TTbJh98~j zOX~z|A2@>kd=Thvo3buMd5fwGqi~d;I?A5jdWkiXOA{=%fQriQhF7I5xEyKWTh~Y(Dbhl4k#G#jq6oGtoy0(go(efWaK2+GiwE(H{v8lkN$?q<9p#kMuNaj`~(~n zdy4brzr*�FaG03kk_kaT%79SA{qq^V0gn*-U|@-X}%A0u*0nhLCd|F1yu{ou3Lp?!lm7mV+D(6%41d&rZj(OZw<5 zF$+#lu){^x7unoj?fByHd**wx2^PBPv*9Pgq3e$p_N+*Pn{PhC%zqc4x!3cU{`dO$^R&qN1rPY%P2QY#^GjrH=)%+A6ln)3pR-# z0M9wjVDE%rRiH!O^^GDQQe*JNVt~SLnIKJdP-(d`4A{Ei^xinQt@<5C?vjK_ffG>8 zTZRRu&)|EXyF-7<8wOvBU|ab}G`*pXQ>iwc8ZU}d65g@f`RCAQa2LP(Z5%4vd|}U7tWKP!9iwRI6$o@LB0(0h31`60UQbr&2yiJ*N+1nMP^lP+IBzWLQCwBNawnfd*M z3hzjidLaYLB4U{I;8}R%zZ5HL&x4EJ4j9%V!xD?)u;ce!{I%m6%XdBrHTvUleWD6q z89S0k9=MBqdkP7=V?jC^xA3BtnL-c4G&Fv0hr!t+ti~dVe$mcypu_u+x}#R1 zrHQ@ZV{;}pD1@M7qzDddKh4-GS76h{X>WWUM#|iQ?R6`Jod&-#eYq!^)f^@ohpS-e z4Q0Mzo-6dw3^?}c9w?qp!SnYo;bWbVIQF$8IlFThRk73Ilg4|{ke7KVy!{8IRE_DA z57Xh#775y>rOEI4Eo4dKt#H+gw5o2@;M!w_!h;%$+_4QYULu3__ITsPeZkCePdl1U z$;Y1(VpUx31vzpr9%_FnK*_N8*j*yehD|qywDjRPt+X9{#j+s&K|h;wOOD@slMNN? zqaY`=sYs$Xh;|TjK&cv z{*WD(0H-~DNJMQmc6~FY8+~TOdFCZvKk!DV?l+Cdeh3z(1U!fN)h7k9 zGRY*bB98sm=_cA9<-+*bS4>fNE!FFtMLZ9D!TRtL6G297@(fxogs@Mq95+}8R5v~)dCso(=^FP=lsK_2?ki#YvHIYjJOjK3TTD>AH=X}@Y14i@gm z*-uUA*Tr){{la7(x+noGPf7A_Jz21kokkCzcm}a689Tg8nKs9XLa=BpF}YpLX6UMO zjk>Au>SY$5J-r;;*0nP=DG}bH*n+e5%~9fCnN3{o6X+P%Eh8d+N}j znku~R6AY(Di%?a4Yn+{O5Oy0)#;4o*Sd`>mZ1)alOZBLry|DsjxvzqZ?GkjX_9=2C z(~|Gjy8tJm1uUYP!XC4$uuDD!-y066e(1UIY)N&`Z#Vn z=MGxDkl|hFvLG9|g8R<+0Ppq`fIxXR1}DdZn4cT(P1B@e(YpA%(1qL%yC;w@dj;~j zYBXK<4_XY{j8E^{Vc?A$xN?;(?Dkm7L-`wgQesAx_YVTTO~sL>C1k$eBer6{BSigb z5%gV@=bP{n7=3yo*ta_pjB<*htcOxv)kf@k)q}1fW`Y|#^4L@5blmuS3w-X{ghpbS z#II|yK)J0H1s^Hzm)?p3FyMx{gLva&7?~oa30ChqSn~e2WQUyv-PivJK2CiHy(QTw zZXJV3Z}UOhO^){OTth})kf!NPpULlgmC&MW46%);P`=_jEZjbfmuSn=^eYb`JtG1W zwmH!a!{ack5U^Nk4)jD_g1Nb=m=fg5TwPm1e!&r#nJov;42RxYITh9(oQlnXZ9hv@di9F~Dr!GH81v!8g{)5x+Iza3xKNNka~b+H9mF6dAH@$xp=M}0t6`_bP(4_72)_7DQr*NLzWSCn`rGh zB{W*O1byZkq30cC>QW@bpYQsCtpyq2+<$@_gtp?{AHyLcvJ*Szt>qVP&ES#s^~}hf z;_6z7>3O3hX!xY*kX`?mG>%G#=lOH!F+W0{-IL{`>-M0!j61BewWjV;dOSNcis|%T z!^Ou|a>*<;FiD<{Yjt0+`}g*Mibx-FQ!zAC8zGEo(`PT=ErxY-|Fe-Y89~GP>jjz; zEjH5|P54q*Y5v^BfNv9C#>L-PLaK=|yK+UH%)2N;byR*ahxHfmPhksuORU7bnU-wI z7cY`Ma12KL7s%3Ujxz0cqI7zC06*H0kJ*(eaM5)DmMF&Jo$@5(1}x!gMza-#~bg}T9{$C;=;m;l3EKM5m@QrP`{ zW|$cr0=M7qMw_AlO!fZ=?|)cf%MD2`rjbvkpH-vdgRUSyd`L=-yNt1+X0rX(VGHI^*+ z1M9LcR(e=^5s69bg;(5;Lr=Uazy50%?dorb)a*b!pOnX1OEciB;2ODV+szzrc~rSL z5b6{43qleCaMZK|7^L)y)ZDhBmj^};{l`q0HuVk|FTF)R*c5?5i74G4(F|G>646yY z3p`(UleK=Eg-v%u!TFg8yEU^MmWD;b_!K2-{%aUrpcRUPi7xoGT@AJFPeQekeRwh^ z8aqapvlo7&>F0l;7-Dq_SE(dpsX-8|A2ZrU?WsP$v^|b2v_DBq8V=&TQ-5I6`*q;E zObY5bJn`9}AaFaz>MJcYDYEF-9xo~ek zO{yKR2RGb(OrF$;fmr@~pi@$C?$#nW+VKgOUjGDZyPDZTxe9DLB!z}A6X8(i2HgMV z0_>h2gM$Ot$QHXAT==MgY2-HJr)PV?u)>{vOk(Vm(P7e=(GFq@=dsAZ3iip|6>nP~ zM6K+7Fm~}tT=n!Ent=+H{ZR>nbvKmoh}(`$s1E$GuYvyFOdEIHVlMjZcP0+ake3ehly5Q3=iVk<7`W z1`SuXpi#qH*c#xC`BN+5vG)mKa6~iq>FJqn59)a zR3`eva&>K9bHDOOmNKe+FuV^85OQx_t=C=jU zCZw_Y{XZb~)>T~Z^cn{i?7_dpTbaNejBtBahm%J?i(Z7IgqtBDxiF9U{GmFe@x zXQ#(nUB^Xw1g4aXq_CYDpT8;rH%-*$F^OB? zu=XbiJE}$>Z4ZYA6=O{PS|#u$6ZmiI_iWq}6JhmAZKBe70!~ixCu62DoVhj(cRDmN zvqh3n|09WrJedVX2@W>1(<}w*Uh6BR{chgmulQrB%6iY6DJL?zp|jyS@841 z0Iuz8We(zDptn++S8cwCLF5QOW3(K^I}}Kc^f3}}^ai_bXNX4|?cu*9HEjJ^iny}UUQ+PgYJikWJiv_(wJ=IslwE4yjMg`I;<}aDa85)( ze#KF^c~}>3Y>32tp>b^C;v;zcauiCQo63Jbeu8!FeZY58#$OA&NfIk66-{H@FzQARb3yEn%Y)b z_4YD6PI`|12h?%b>U(fDA`n)Fb=ruaHh*_a2eeexhdh~c&~d#KGD3c%@UjuLDgT4Z zO|!@^=ikEB>DNKZs)0!r?}IIq6Ja_T0C|Pm?83-7^yOj+nmMP3taLcVx*GbiR`IiN z-V$F-$w-FWCuzV^H;_lik}>pvDjA%$31hD5k(v>Ocu}t(-?pq|6N9Uu-EaeHB_9CW zW#YW^gdVsggyXTkK-BP%rR~Nc!qcX)EcecN!QU~r*lS}#nsg<3(WSerG4=rFJB%bQ z%>oE#qw!>F0vz8=cwf8*>L&bvppJ6*u5%i!|0v*>buEHX_gaPLpUCo$8Hw0>W&$6x zW(%G-5a%wLmSh)~5qK%TfShyR*ch#5wAgnH_T^kAxjpu5_;WR0SD_3mOcIF3l{&0l z8OS#Ve+9o{TU;z?L!R}5o!porm?ApTrp-1BRX@zc+l~*wS#2(_tmMD6oVS~-nfDdr2{}Gcddw`V< zR&aQ}28Q|HAg_Pb!VL=}@YaxoWZa8MtFzI6LKPA94Z+<;?o8`l1{^GH!&M8iaqOEU zELoh$RBL`g)fjg?m-q;7eGP;6Z4Ze~*B9tGCxsI4mC?gknQB@w5>s-Mb)BBa-Y5lN z#hyt-Bds4J?(PH2^fEYeyMsjBeG8g`tu`K8au`p1i}@+9Syq7_5&LU^x9@}t%&aL_ zTeO`;?BcK(C)3eSw0T0R1|Ntjfg903A=tDNBpfX0#eh3#*0KQqdyvY?3x{<2WHrn^ z`i2$Hk>=9BhVg8vK0Mf;2ZP^6a-R!NF!Qi3J@BKCX{q?Ko=Lf2EH#POTKvMz_d7|- z*WZv8ph33mAM()i9;5P^LcD*?kjzhw#=d_csF_&H_HMj^HS>SNj*))_L4PNs)x)FE zhf?TqR~nAG{GTHyfcDm@aNW?GR~=HvhH^O?5_b}>EepZaty}R!U4XF1rV@?Mi_`kl zulVs|DSX+zklDOfqdj(ZuALMAyV{qw-_xecSUo1JA9J*1jbbC zg|Tl(aSbj_?UTZV-It2cPbCzkM_W;;lLesvHyT7#yUD^Grb6Me7o2fEJQ)=k2US3_n}81kE$6jq0DP5sR%8` zVT+Q<*}1{Eagi3rc26LmT5hw~ep`vaGZ0QI9wx`w{hY>j*C=3XJ5__!sW0l82@=4WDIFX4mW>eP*^+j5!rwpqZVRt zixQ|3McA}UA4{ef(l_N5P@j7WW>}vFrOu0j&5|_`D_%#$4ll$rj;rbCb>Bha!*BRh zkpz9(o?L8FD5kcnquJ6fftQmzyvVqZRtoP(&e2KK#b6S%9eLD7kQ#(%NIx7Nx0x^8 z8bWI7-Qi_rhVZQTF+6f~s85x|;&FR1Vp*L`Qc~#9< z9$iyi;Z2S zLv5bxfpzIe67jD=nESFAi)F@Qt+FarQ5;VjL*=OYO%=!qpUS+}EFvSaN1*3=Io?xx z5O8ic}f%`fftj%{xjQwgut8GvDy}cmW2C zxrJjk^f37al`u{=N$~sWHS#2H0B)yEqxT+6XV<--l3fZKSnwzV*QVR^hy$g>_~%Ky z@h2F~rp`h6FAv#3=xnT;AP+V(Z)0P;4*OQ*!%UtHqRqz|&{b9kgDXmK^!IV_Ns#78 zT#He%tdV6+GlUSonHadt3QDy!;OnkX@Ys1|C@bD-_~lhINV0`FCQgcXWZcK zqX@X;o~_+U zFpp=D<7}~aIyv%m0(Mxr3THWqV((Da;nbKKL1@(g3wXb-YJ26$=|1~9P}md7zgiq& zt=p2x#?N1|@7|DSuU`N`_6FocVIWR-Cb;gpfQy`1g+5sb`4yhRi55|iY&V9L6{{|9peEU{xz9j)a zn|+CyY#+Y0cn&7vI^eMS7DzvjV@IkN6I~KZ(haU)p zK3a`phthCn{zAdQS0X%h`7IJuGYV_t_7KlkQMg>T(dOYCGgzp63zFZc@Fu5zIAi3& zugq+~M?opXXO{@Kh)zS_kRe{wEFnyetrcbsK7>aMFInDS01- z@fQ+M=InPk7hX%UoGj_jE$Xyue-hj;l%iGTDt!1FZ49xL$9?w-QDWXKn%R9sxJFx^ zCgN!l70JC>iv?*&VucX;vZL-aCbl z|2!8BrBaDY^-kjER|{9IRJhHlpQyTe6GRP^k@agk@!D$#$aEXeFAU|)BJJV@PdiqF zIrBl4rY&TQ{CUWHJQtYzkguJjhjEtLsP#{t8!oy`)?U1ak+0iGh2Cy_x4CRccld;o z-sZSEV;^xp`<-=bi|~SwS?pAFGiE)PfboU~a9%zc>TJT$rR)x*oj6WnCo0e>zn`E- zj3dvzbeuRZxC7D?jOCV^(U3j?02sFNB!DZLeu>M8Vkk>X06rV38hMGsA(_tDU zu}G%Zv4SJ36OR)plYSZ1S$eG6lUAOsV_~i?&32%(Pkpp9+-^Y zcQU}+sUAyrq{5n~jX2fF4?@bbA1ZO2}tcl}$aw%5S< z&;H`$)5lrW@A>ey_Cb}N)kt>vu|IaHWaFsnW>i;u36p;g91KVlZgu|& zBs_}z3hW|(wn;+vqzb%evKt-q8_4H7Cj4P%A?h3)OXdt)f&n4NAacPejJ7Dp2l=k} z`&=cYoD;(ykOPO}B0Ox|0^wb?crwZwen-D2E$6R5Nkkne%smdHWA$i$dZF;h!xb?4 z(;Rl(NLg?q^aR#LPJyE_k)W9DW;C|g_Qnh#=el1GI4Lz4&-9B}CbQ5Q* zKB(X$-!E8j(;SWO9i#kQDnpNA@H2@55e;v6+HXfV{RCqckDv+O^>BJg3rhE-B0L^W zrwqRfr;|tV)5iYbcQ77D#>B$&^n)zS`wP3G{~EsexH6Kg3^Ro9hpt1BdoHrX`R&ta z;&mmOUg9$71(Y-z$UavI5lO_A`ZjS;z zpu#aQycLo#2i*?^;;u7$ndGqu!5X(LT&GQ_)r&Slh1yi6UX-q|pMrI{`pfU4)2{b$gPq2Fn8l7R%LLcHNcf@s9{aAPG2bD-L}HsQw0IX{>`xn9aJ7is zzb?w19xL*SP=sfR51<7Pv&c+8#I-K?Yo-W^*V+Ta`+hNxiDy7#q7$t9F^X@?6VTQdfeC=5+9=f?i*r{Ajg#M54_$WzK)yO3EhT~zP{Bg3V%mZ1)O6+xr zfm4Ty1rrxf=i;9i@QYUYLbsa$vP(>C`lTk~!9(9M=-WjYcc=xDOGUYA{xNXcc8pyZ z^cEVAGKcdW&tYF#9{YB&l3ea@N2c@+-gbQvtlA`l0V<<$$>w#ab$JB2Z95LK?q!lY zu0EJ?yhK>&R05Bi#`CKsZ{YAF2P{yYj0?KQu=88fpl9|R?3T@9(uam~Tlwj@%Iy;? zK0Hq7*&GB96e=e_`(?b6#4o4#$qp#E#h+c(qG` zI=wgoA!qNSP4_8qzg7#Q|K#D5S{=SaCLOix@?m6iI;gE40O^_;v{yEpR2-2&=Oo=R(BU@D-l>QqmfAtv z?nvt9zZM0mN*LB$fuZ^{Apc_~+IIcN4t6|&PE`Z^tu+lLA5Mb{E`G?5p2l$|YVBg!`Q#N?EHfl2E*sc}qA+-3qfH|Y#XzxR94hY} zhHKVX(3vle;rUK+q0Yw1Xu86bl-9h3VSdAK^+rRe+3gIGV1tFyFPX*qR*<>fj7D23 z0N*SHC;#)z;Z-YhPYA@}#`}dY9HJr0Bn9Fn3#WfRp9%?9!5F6}N7gW z$>N1rLaz|5ZxJX%=0kkdV!S`AhJCJ@$@43;dEj(2Uaq@e(A+l?=5}*7c*GUIHK@?Q zzh3y?j4l!$UIz<0hx~1##BIG|@$<`}e*5b&vFO-C_PHq11Wkhb=M1o zR5U(xUreIkuZ5hsCz!f%A67@2k!hbkLW!*nc>JfsuLei3h-YK5t0b9y9=}dF=2jZM zah=7Fd*5gG%MOCELj^d$tA#J~ETLn`P0Y`%#vNHZAtZ7%xMSl zNexgJ^qOgiK4vz-;mlVd9*&x163w5%@L)j^#{AKyT_2?AzNBT`$2FE6x#@zRM{TZZ z4$#2d&c$G@bBEmj-iVpyVQkK{j~JAuh2v`OkliD+srS1aa3^`NeZMF_uvH17JWha7 zK`Xe3PNSwtd7yaZAYR#10qa*!!xj5pVToH6neg;FS+95nkNPWMfx=L|^n}B6w?;B~ z_Do`4+zrcX7*iY!M_B(4cTbIi!Yzb8C|xa36`4)9Cwd9Ak0+w!uE#ih!$|lR*UKzV z6|!r`55QcDY$(kA2!C?M)5#b9!v$nCy)i=@-+Y@*TYDa(NAnJ%*E$bW(hlR7G)=y6 zMloJ{8V&0H0njHs6+*>huyM$Ln=~gI)kLqLV1+yOSJ=SgRtt#!V9q+P?t*nX7QBG; zqTeS)Hl_C?&Wqa$-}>r6xj6;iJ6lX2u=NLL&l6abGZ(zaXc3d+Ml4EIhHq+;<@E_w zENop1S@e<##A}V=z?D>@5gLro6~b&Vlz+|1Dn4Z@Q1~ziPF325zJSr88ufJ;Mz7vc)DN_u~_N`uQV#L?|K_# zCh1c{uj5r|(rJRLxAmD^kF(&a!V)_B3CE9f-jk29vq9p>0M1PmV*8O=*gIYZ!S5DM zl=OmAojD{RBLVc%RB)>8IoKlW$z?qE(L>!4{N|%%5>k{7u|M?SQJX$~F&IMB-U6B1M zf;s7?aPU_#ykVs%@_i*;y1SW7=)KF<)D{67^+vEKVh@^?-y(UcQDkGHF<*FSm+++g z74(hW4knH|@ZcWBU2YSpnNuc<4oV~5i9NV>dlG)Aj(}@&XTa`(H`BLr#A3Hta_o#W zY`>(0X+v6lP22rOp3*tSq6OYxe|@43S}oF)bPfUmDse+0oQoV zpt%|AG0#X8JfegktG*keFLkm%W2-<_sDg1~=Ctdfy+CU7E!erO9!HDs$4A*_=q)jV zd&lKs#KUu75~511#G;@gP=#;*(LUsN`e3B$2@otQ!k^3HSTu36vAHls*!H|0&CZDM z!_PYKtGXl_9JZnla}7~p*hoJ0awOP~48!=)U+ntMM^N`%Ye+j+z-^l)sMkOQx;pw} zT<`=~V={%Fe6j`VUY5ghsam$;oh@q|n$w?!C7@(yHLM9TrQ(W?kZk)@_-5$e^p6UM z%!v*}_D~~emDPf5i3p!?ei0OF_=0Zc83>3!hSQ4k@aOgOtoYa=G_0}2lIcp&{AUr? z-|B(?8YW=Tjzp|bc#o69Lx%7-AJYae0y=k~U}`5MJ==yhhs)MH~&2usJ3TTi>9Ex2PVVf%PQahASo_^wfXh z;G$iF&qa)(>yjDuSz-YG9wLx%TO2-XhYPCywZXckyMn8q?Xc^{I<~j%KhnB)6cO9; z67B|M33HVvV2UOO#iId4wbq~Yx!GV`+I+m~J04f;KLIP(9<%vde-8F6{J}EE8sXQS z=TLr=1dpF+02d}j;NFh^KywJw)0f7RXSOChY2{9cnRJ?|x#Z)dNp>i^YZZvy6+ljz zJNxmg0E1h-(BJMbR9zj~BN6?MqZjnSyvqAzZ^t9Z6TgVtCrEMGKwTU^`3@+>UVtNNP38*ukaM__g zV4LWGD>Yhic}_Zv*l&mBrz_BMC~beD!JBJ5l;efz1t?izOV6ltsMoxV28}{o|0oaj zlvaS?>2}mxD8qN}^5rxC29sqWd-2BNQdpRs5BD|C!Ck}W*!!ZN%u1`mDOtN=O>+_s z7iPe?v1>s$BN|%O=dp$3Z{w=FlBhN=7viUepx@V{5cIPaRMp3$?Dl0a%X)#(@#R!% zwfP&)_IV6PW{G0hL`B;4b2R%o4_dOG(L%X#?F1%PiLy$ zfd0VQ{G4a6Ktpl~%{V^=pr;1<660{w(oImh-@t!$InltPSWsqhnN(i9#soK$Nh@c$V}=300`+OJAd z`S1b^yMBlS4?l}Kn-p=Pm5{90cmsD@caUY`pU5=(H{dbkOAT+^#0*Bekq1c%LaU7# z!Y%tb^gMTAEh;AjDax~nvV{tspFbNHm5k?JF?ARdV+;Ai9buZTC;$Gr8ePXA`7m6X zj#C;-N37~0fvJBnWxE`EUStR4l{b7#D8elw6*%sRC9Lnw1vAqo_Mv<@tC>FnGG`Qn z+AL&FGI6Z(p*v0#`a{0bZsr<&8z(FoK^>hF*tA*1!XU{>i3VQSiQ(2iL9Sj4rb{%TtMMV2|7bIU)Kez2R0@{< zZW+oad?dSbZsP7KF*th9FX+9MfZyZYsLZL0X#MUOejMVrZ;M1|*THH~dwv62N5+Hk z@Ub-6w~c(;ABM9pN3rV;$&h6-1Ag4tjGp=vX#Shi!q~e-d^`dn1WOBxKGhOVJ3wcAnes$MNGJm}f zbZd08hDiw!-|-OoXfgJ+=d!@_86fzO0%Jop>48VixK1G(GYogb42pF5tiXPwb75VaYrv+m<#T{o&B ziXJ-{~Xdv{!S6)Nt==|WL!Ni>G+JJ!fr#P?mwFq1}4HuPN`7B zyrJXGQ#hD52QC%eL6L|Dq`J|8e;>$!F0r$q)@#A$nA(%`zt=prD}~jqvh@8w z3E}%!mjp-nIjC4F3J+SZupKYY;>-$ld}?pZx~vLV^@Xo&{Shnb^=>NfTQY}uxLD%Z z^~NBbu^yJ??Es${&6qN8IwTwtg?5i(7Ibwro|_rRW$!iTQ_C%C{MgO8kXUdW zVlvzC{uV{l?vF(4@_%qGPMbwvP~%5G?LfYg2o6cirA@tckTY0_3K`?+?~$H(?rSBq zF0LUKTZS|~J6nOm4IQjivE|0?->_-xAtF`yl|}Zd;t97w=A_&T8@8|E4KvrEi`Fun zJ5LS&#vFyxhg{h8zyr3&4T6)k5{>9F=O>Z_$kLUX*b$!vwnfrB-bWJK%={p4NFP#Y zeZ_1vH=(7YJUToMVC7RBiKOB;FkD{6+->b}*5$WEGHw;Tx;P0w^!cEfQHpS`jWkr)WZfxUd7`r z_b@D(z-(nn7?nGojy_Wf`(zTp*KHm#yBvZKO#Xwlim#w$ObR<%@(q11lrc|_Vd(Ut zlPH`!hZnwk;D0Ig!rMdH%s{u%aJO$KZU2x4niH-wm(z7*#H3v?5A}J_ZXY7QAc{>K zc?ael>aX%B3MKoFE8~f4-dz2<1g@7p2Chp|$)nGfT+kkZZC_5}hh47(4u``)PVFjQ zzW)L=V=VZ=Qv%R>YXrf&{NcvVF_1q!lKpHR%WapIqCxB#T$OVQHf$83kIir5hJE#@ zR$WHEO`U`Rrv|XZ>l{q4Tgi=5RbcG=X!z?Jju}=4?DW#Zu=|1}Ki&Nuzm~ll+CjXE z1KX^*6qrESn<$u-wpKXu_YC2>Gty+u4O!S98o_dudYIMw7^o$i(W3H(AZbSpYOM+7 z>!Z^kDBu=Mx3qw!+POkY@oC_@CJvN>WwEBz41CuYV@v*~s`IuAV4zq%lo@`Ck1uYa zD~6AQo6agcxyFtT1emdJNB82SZ+``Si8Dy&-IXxv!!kT#Ys>mdhVm-gw5iK2RV-+Z zBONn>Va~qaq;yO!+-u7cCR;3oH-?I|Hb$IFxVj0PUcY8RE-@^lCK<<ZcrjI>DpW zZV;x~3VCk;YW7@UOS?6h>n?Ms+7|$aylu$na|V1waX4Oc?G`M0N{Hi7P5YKtVu0&# z`ahDcJgTPeeHTiS2Bk?fm(p~{v-dgomS{3%3K7W=mHHSmCDNo)X;LIb0|}|5?%DgC zdm}>LcdW+tP2D{ga zuU&s4KF=!2bnk^wob5(pZ?k++yEl~CEH#t4rwU~ewMDY=qq+0!oL8c}LaUlBucB)v zvS(!PEl)^?cFq&$RyK>@7m%FV4n0xr@l21i7q4}2<=A1MwlvXM!jrpms@hlx#% zwu)bG?vfRh|CW`jGqR(di^V}Fa>NH;ua>o$oR`fT)y8f7Y$WTQAr&WfNyQ;kBczsD z^F(_aF`G78i$4@%&Dzw@6LSt+6@{5dWe+>eYL>q|FCvDLq;BhCWY4*m>0$ z*^dNoS?8OhGO$I*(d^@68E?5#&F<=U=_S1oap|o`;t~CR>E4)$vN?eTVkR(M_Hn~~ zaqJ95*%HE8*32!EktHW&2BF2W;AZuj`86ssKgBz;6O#yOVO*c=rtuO=ymQ*kZ^t3=vs@qX z@h7H^jLtoAfJeD_zU6iC(a~BtLtc;Q((7%q9?fjo%m0RCy4jVZnn*0GPZdh5qbG>7 zt6N0jQ5H2jC+12Iuh}Xy@75K&?RIl4+0i2YbGTQ$HRq}Ls?JWCew&ljE_Og>J=)*k zU~8%PTyBxr^KzFoaK};Uf5$J#x*|sL!skTMOU+o(ELnp1!1yJyqJwGhH30N1u~g zPA=^gFicpX7O>nbh&GcY=mD!MdzxE`=#T>S${vujKL03((xnG zf-fs&Hx~fMS?N*Y!K|_3k6e=)ewMYkrBA% zW)Ztk>Y7{(cEEEk=9&QX*Xpds(5WK z5`58@Ue~Y?MdW5sNiCYvM|$?`w(Ll!2Zs(i(a$5DwBrsstu8+3lr=BPDQrQM)9Lw9 zPVYv|b=3cOIKt`0s2@6Nk}n6HIwB7`T?;?xM7l*ewT{~D(RawENGFZa=WF8+I$4h1 zZyEJ9OAb2ynj7WB8TD_Djy9uf<&Rq9frCyLqDSXhnu_jxHIeT6XC^gMG!?xXG7-gk znu$`NnMfhcOq$teCOTAPE*dJ{PsOTOh)SWU$alJ#RPLCus9aTz)VVxGZ*1gW}?nTX3~)5W+EFO6VaP1CL)_% zCz)0H)1}|Onu{!7Pmy-bF&7>BWiGn)^A@%2P64&wXLRmwV^Q2wQ&IaybCK3RU1`TX zbCK)&=~CB$Y0|O)3sFZ^5k7s#Tx2sw#O_Y)rdnT_i|!j+i0%n2L?+KCNL5Guu&3rC z(u85k0!9gi_2#0%k!Du@*f^=O-2Y$OCdZJK84UY;_Eomx_5)IR&OY{~e++fOH(vBF z@&fB>ZA0xIa%YNnJR)7}+n}^vg)HrcfAxNywf~XZY>?V(RabIe7f10#L@wCN-mu!3fLM;FZR1IR4iRrtnQOc}c9w z*qmR*46YqQQyz|CrYrk1)19nHOK)SwAe4nJU1O<#*Y=UCsSY9EQ%XiJ*-X~>pC*TH zK4+$F|3zx%zl078i%@K-ocMMBav2@IVpMCZOMM3iHhBK`3Bv}pQ( zyr}QF26c_2EAn1`Rw&~Pq>xZe7%^}d0)vk`EDWaKD+}DX^$tS?Ji{u-Q$>)OFx)zpN})bmg2rXEoMzqfT$#@Qo7Np zhqZQ35*<4;MwFIvL$q{Xlqk>bxm4@-2WnvBTc&c-H~9Km0ag08huUhOz!vUWMy+=| z&)jQNmZjZh*`q35Qumh=qz-=hBG;eEjE_Z6=mSqm?zTXbg z)CKoMR}RLoo_fjDhQ4U3>i`1-rvJu%&W>!u$~Ej&+id1YUjbz^<0=#Q_6JmCQkaSE zR#*dOk!OlNlHt4k*|kZV@Gp@j8{(19ZunS9R^$&b9bp#CgPS_c{Y-17HgABPX?vQ{ zd3zbzaje+T?~pN}CsE#cW2k@Uzmap42bi^T5157p)nu{lVkkVRP9D5GiM?_%ncQFU zo^^Q*8CNG0X6=)itWxu2rc7}xZr_pvl->2H5jL5P6+R+MHY-u$)jiC+z+dDcHiLN` zZ^!yu?;%r{YBGDKs7Nn4bukO%E<^2O*6dmP32f~06eetb5Czf?u&FM4aqYtrxV-f& zV>0g#1hJn`ri&xh98*CqG}_PjT6r)U6NJnKY6cXJEP?y}=7Ij#Q<$H)khwZdi!!~a z0Jo~>k+#=L$&0fmFdhAO8N72M>3hE!D;+*c)+*VK?(6Tc>tAxRy8D*+OBky9WT=q_4bvBfaD#iKh{Om+LBMHUtA)+lYO0`vyL%! zPtH-S8HcjHGJ`7qAt$Z4Q9u$a|53HcyQM!a{${tWJHg5|IddLS(Ch(fvG3uL9 z0yVpT9GlhS#5TmAV$O7N7)OH?q!ca>6Z_(sUY#VSdYLY@apP(7a9kwy=1eh}=`fwK zpr^BP>{;mdA+9?1=4&#x<2d#9@Bw%y>@=JnwVBPAWHakd0e1b?9EM-Nn7w^mpV~Xk ziZUx&A$aStNYHXFfS0-UK3_%sKHtZ$Nm#kcM$nmP#$Wd{jH@?g8}HqTJPyhl;oMu} zC}_rh0;h$Uyd{i~yTx*~P&eZbC*i|%q1&T5Jh7QP)Tvke6YMHZo5ljZ>s>?Mm_G`9dEZ!`vhh~V-P!`qlPgZVW#<_TW5Er*BhByg1^Z+U^8KR6+4iaFPPCUaEb zLVkk%Vcx`t;oP%=VqVw154_$JDf~67?{eS&cHlSU`*L?ovg2B^MS|OsE`CmJC1>G@ zWWl|?34$VvpZw+eg@Q|ut$CW8tocSToJTe0aO|wM@%z6V76=k*1l0kdJe4Pgf{-)! z1szldKljX69@YGeXCOa;d!|vFPir0){!01HiwJjj;MIQP=0CUOt)Dtyu*NuD@E4rs z|3o%|+6GqvxN(8|zBrkqQ#8c}bXxJ^-rnQBn0J{M>Uf^>&Q;zaCX~yWoVu8^+Pu^@ zc`a$LOeypF7Uc5om$}#Tg4ePTwpZ%}#e#LZh z%>R9LSajnncg*oCoJnsCxT&^r9Kq484j2CJbtri}hP!okfiU5%3n5)_OfaZ1D1@eu z1yAO#Bz7FB5q4)ECcb-~5IjBmf_Fbmz}@zrszA8-l;De`p10ZS3cvjBFP`zGF5*Y2 zqd>4el}IjZ6%3iH6YsaX^1c?f3y)Q62sZL1f|mCA0yTQIFuiL(zslN9_$qx1vE7#C zAMu?oDBXFVe=@3x<9non;~BD)KRlGoSJjl~ZT_w!i2pc;cX-ovPW!?xZc1i^1LyDn zx74$gcX0P{{vWR^4!&B7{A*Xf^Ov3!@Dg{Q<5e%y=Dztn#7SP}$^EvmSn%NP1l|i1 zOZ$T7hJ56>hC7(>iB}QTz`Liy@U)j-=Do@d=e=6Jf>*XxnX}gGC+Df*2VSGv4=%gn zl%O|!HdpJfI=8OVj33+nj6!fW4M%; z0q_3!f4uKoKH6K2jOUJPMnV)^!gbz-w>Aodw&^YoYb2`_)r z6ezSr3e$Xd@D^>F$cyaI7K};R#V`15z(0MA6+~9A;vG}05va7431}+)%e#xEaq72U$CF0oy9YSN&=7PTAXDMtT|88bNP!N z%JKSj=kn{PP2kP?I2vC~RQaug_WUe&8P`)&fYj?SDoe998yL^y!Gzj#&_}b2=6|t*I#Vtoi{~t8_){p^ z?PUllt9Qim&j!fusF1jlAP<$ex!B@53!V*z;XTw@;ssKLEoWZhdF`vwKQ}cZ!ny&R z>o^0y#f&9&{ll@aNg_~c=_fuk%fXf9g@kbOYE(Zf6GsS}p})%wdtjvN9u%K+NmZ4A44HstLUQ{l|SheX5#cQ|#!Tqt=V#HRhba8BW0+AHxRP?}`~ z8`}J_Tv)0wSDZ{+paQhne+$#ra2%zc+zo#;{zVz}_rMS@9ZPsN^!+bW>3bI>_}d$0 zXgBFJSl|+YA0Kii4fRve^$$&gTV9cLd}$0QT9Aq_C@VAT>(t=w^?6vgH5Ce1F2eO8 z*}^}fC)i@iOuW|H6?e5A!JD`L!O!G%aoqZR@Mwq!)I%F#!kPQ)qNOF zY8(JtHIE|m{6sYG1fCQMuB4X$Z-OU_Q1fH(Wa zqNTo}AgIZWP`n*RH$7O0P9!PO%bYDG$CTC)L-Z+u(~J{HWm!1kq+m;LnqDmImas&| zFc0`fzoIKtH_}r)#-i6lEAZ96+w>(`Kx?jCgueR$qWMJ>eWu5i=sX;S{vEwRHy(OM z=Zug<_%3hQxJg16ADRjew)~(wOCG<*qhSd zQ;lf%r;|b4lI23WIEUD;EkZlCID+`<6nb)21g*5n0BX|##7ARi7-_01_;0%$5KenX zDA!&AT?XMirD^HJfs8+3`(-Kc%L_%)2{9n>R5!i-)h4dXC8Ag+Kag_n@$$}yAKkwMsu~b-FQQG82;;Y73*^cpl$STsJ>wplM%tYnuL$rj??Q- zN1(FrTJVZRy0Aa%EExOvJaF0CPhVa#LcgE>kcgXhS8{7|I;fZ9!+QlU3Firq3B~WT zkazW8qBO4(?QBs+JAb~S)BnZ-HmD5zmo9)Ra{JNyU~80;_Z4h1;9_f^OF~aoN7&PC z1eAUVV9~31x^_~6hWMJy>Hh-a$ll6@JKAVE?DIzIeHP5w6Mf#ey|+$@hm zAD;sA|75_bt902Pjo3{o8-UE04B-0z=1R;;+Ee6IFQ;zOnlUb zs<+ppwo}T~=}Q9kQ-nJ+baMiAb@CPR z_06PtbQH5&&Y2|5LYcTN>sW5c3??R0gRR(8&(!$5CwJR+KrgIH4eM@U;tnmqi+zqU z;~h&#GQ*D)z3gBvXqZyQ1~H_IS|eHT(E<)!`-=>_4Uykb9W3{33szpT40`j9z>&8+ z@G(mbKCK#0Ywf!rX?tD)B5rO2-JCtb=BOK}{zoc~2)4vW1oEIf@FHlx)K088#zme3 zNfJ|a6&w_3BUCTd1Pij=pz~51+&owWy}P4P9^sZKedu*J88N{PfU_f+ zKC3{ZrpyAkP$?6HNb2YS2N8PQnT+lE&++|0H<;_ALEJS>mq@K!&}HR#I%~Ee5OYJJ z2Xzkj6paCH>c8oVn~(6qk2L6A$;FqZS0KseeMD@pGPbK4&+J?L5-lPXki!Fry^0;M zxxfy#J8Hm|_3OZMDUEu|)v#aM0vu7d1upfd27|O7zPiqeEc7fyt8TXA_X+=C$olu_ zR!tI2(8;IwTt9_su6_l%Q$C`=X?^Gi4ua z{zy#TntD&RY^ z+YH)$Yckq%aFe8U@=c^0Pz?p5UbrD(F1kO%9|kn0;G9Qi8E@~2a5=O^UekL~sfic% z{malVd1YwYmLqUi`wP5#j5++_@*hkJF=5`k$pUfZvl*SOhS*x=60*9aijTO=XAUY0 zv7B`kz06a{F~tN=VG_zTy~h@xG7!lbTT+2j#z{ju1K6f? zU1_Z6C3c|_hx#J7lg;oL?O_#s67TnxW1}8jVBbHBp;}Jx*lfo%N}bdc?T^%=x+~;G zoXi{4>59kf9?#26!q8h*<&_~-Xmb)iYCc8TX+*MJk0aUo$TsRkp_X)Y?RIt`uYp}2 z{|N3`dk1m?QqYO$RyuLQC9L;q1JkW{7LsxPcyq&XdKz1RykFm;H{o2#vkZ4=nQ{x0 zkIsPkyeg#KGm%-c?E$!A`5D{j%tvv13sB)@c}!2UXU2bV$4&C3AUf~?dRX}wd+DY^ zu>DH)t9BKj6qAPXoK3OjzrDCsEsaJN5yXEEiNJis93Edc0qJ}mKZ;GG2)_*&1)f|$ z4=fHA7^TVRC(n4oAAvg9;zZ#nj#q^TgC>C5WJOfgX^V?u^U!m(_24~sDOx?Ej0y2c zVQHuz8aLA#f7mt&b!^wfu8HR0Zt+7ToKr%4cf1T_e%G;tJWS|@Cll0M1<30^3$33e zL2gwvp81oezm>Ps1AVn%xnCCHbfXj<(apemXIRKGS0qjX_lOb6UH6B|JrT{V_ znwZiyjKT*4vFvp`{M1nfs;@tTqOl$D;2}k9JM9kIcYYj^u{@Xvo%aEJ`1~6xt&jj0 zqkejhZ7{yKDjZZy*J9>hg19>5A-0)d2!ALQz}Cg*am48W!nEBL6nci zk4t`N0b!mZJZ<_Ac0SS|w>^l%{_9lH)sPC|ZGkQfI)9xg(LaD@8(zj6{`SMjjryqb z4FJIprjdz3L+D!HE_i3bFTnlrt(pqW%1^DNtoanwV+bNLrM_hc?}=)M{`Sw_*K;&{^6>o|V) zHwWAK>M=JiSwm_0P7W!(1OLaL|}v`j>BPsnO=$M0Gg zGzBu(b#^1?Qa+xDwBd~9NGO{1|lqr(JUWT`rexgVO;e%C|9A6Va-vZ)awfB zsx89&W$D$i>(61|`R2qrkrkr76VU4eS8>^@SM&*=I(k6M7}po8W0LTM8k1EKS;vKK zggWwO>XDv@1qfYz57g)U6S_V=NIb5!Cz47MvC7w6`a^aT-Qs)<`HY`|w1*WWb@h74 zAb4!`SXnSi)0qu!YrX<2Pi4`!wa*i0rnI4L1w0hD@fLmgue~tVWjvm`dpRN#282E{ z=Hc`&kC0hr14;{$r#JMD#Zx|x1qz(SVBNOg%rliB%KLdR3D$bC58x1^-@b)?_V)p^ zP4PqCnKN_tlnT4%hCFq>DwS&VD88QYvan=C#27WbMPQ(^EXGjVSVb7%8(YKE3C``?de47m!j-p_YeDSdW? zDL@&GfBO-1Yd;`Vw#Q)En<`@9_XD&g(S}~ruZ1|J->Xe0%s@9fmkWQ4B+>T=y+D&m z7a(S8<3&4`(4gupc8Pu=RNa3VfmNl1o!}zUI3z}|AN(e&jKc817$fYd*iFYgrV)KN z4FS>>_`aS8{+s0pq0K_*n)Hu0pLQF?U7UfRg?>Q+YJK4K#u^l>vlLY@V-R1$uHRO2>GP?q)d_3MX;lIj?w{@jjsJQ|8AEqk>4R5p=d zb{B19(ok`UF2G7>(6LvR$j<5#q0(r9=C>q5PHilVSuh?=+|mn_*8Ae`X&#Jx!av}> z{5~DKEdlMlt%2>1$J2Mjny9{U5!ASK20xlx4^BV+4Ve2KINLo51S!44PRea)b(tD6 za%)AQ7uA_lZ~vj0Q}XHc{++nsMKNykISF!o2Z;?TdB~>PiLN|8AE~-{v9BK$P`>Z0 z$h&Gs+27L42=FJ+ElYuBVQ8;p(cNA(1zsKkMN%TcC6j>{Nv%~a}P z#c|5`<0-PtC4mYHnopfLvyRQ|HDeU}ce1*M=SknI&airtF=aG7j-_MAv36SmsaJna zvUHe5VBXgjoWoD0>!6Cv5~7j68*J7Yx8d+RQVeV{S@)*$s(Yi znuM6yiMai37~JEi2qIVR2j4#VBjs!3@WRvY;DYjGxV5_y*{Apb*HbmPZq+V)%B32< z3;T!WG`s=ZofDCJXR~C<@e`6My-C2UUk1{x<{^_LS3Jw25dU)eKxqAQ#8l`ou#*j= zfcIC4?gUFLNl-@%$(jKClJ^`ZvH>juzt)SqWah?Iuph z%$RB~5qun+ji04B;>^y8!1?8Jyl3@3#=B}gF3T9E|HLAEbms-UCuAW^Su(06*e0ZR z-B7~#=xm(PvyQo=)JFcSpFpyHPncLH!aL~$K*9XL;C>MlTYb%iWK7!7$9;wUn@R5zmxNo}xbX#$( zTKg+Pnb8n%E?1&M>!MJ=TW{n&YdV(xHbyJHt&oJB*+Ms7N+Cv@YT(Li6(Cl(6s+Lf7{{K*!>{IDF|$e59A5FD{Hme;(eW%hj5M9cxvv z$s1KH=`evZqb%^@%zT*Ua~^CfzkxnxG~ng6DqzmMTBP501}u9*!1u<}K((PYA)?E0 zX+H=5(ow{fAt~6eM;+@|o4|mj*6?{~I$Pu3N%?JlNB+*jZ28Y^?02~~c1-jNwnz0E zx#hMwOI`lQtjt)6Ppqh7M%)gwC9CICE=?bpf#Y1NOZ5ZgBE3Tj#$Kl~N)A(H2adA` zHl1Up>0M^O=k}4)d*bjsVmCGW<{Z{H$evx)kxF&1zR#L`h3uIZ*=%C-4m{H)7N%W4 zNnD=g1%GBl!O=DYIDT&dF5h(u{dr}Dt9L%3Ww$N}>NCgUhZ|CmTiATmm9GX}wHu&H zk_SCWz|Ktt*__uxR7o3-tecB3gGSsmFctfhG~)|T zAA-XvE1=DyV2lpGLu+il6IK@c1Qs1rq4)Y@;P>UJc&X7jAUd%T9r64nOny=-Fk(*% z>a05iEw^8y%t!j*zb{3AxAFpbr*(?n>9dM1D@j5(-W>oBoOO}ys|wn2`zpe1&O+p@ zwF>pOP6nTryhK+er$9>bdBnt|)6%qspw{m!emuz;eZIAh=n|!aNox;4kzphBTI!Cw z%8P{!Y^iXD`%UoVdNi^5#4>d2NCaMg@gn>aFbO$ZbP+Ft9>9^ZXDBOaEF3!80GO(G z^mUVQ@Rf}Uxp{B^B%eqox~%(wk-iyR(O`iS4wHE02?07bw*l-!lVDAA3?^EC!sqXb z!IxECSi!P@8N9E+Y~8R3F6GNJdweJ3lg_HlEXQJ6xAGnu($Z&wACDyy;+yJ~mpO_HFDk)3 zrnl(_F8{#KMg#C7I0r?aT!qH08V9ZAu3?pb`taxG9E@Yai4Id2_@T_4ari|eF6%&C z?b-{sn+?F_N{h*5O7&Rb3QKH@2?o5>`LJG25nS}EL&Dh4_&|jN>F$+<(wxePWM?tS z`w@UU+xm+_0B z@`)Ijd1@uBv0lVn-&%ry)Ey&3vsN&{Tjr2g|E(qKg>~rgTMn7|XEL#4c0KVPheE9! zEw~}!3C5zIu;5t+BN}4hsuL;rZM!?O)onRrWxSDWsSx2HS0i{mFqDpY;*Tr#YA{Vq z2`JtR$Xl+N%&RA7NH1w9^wtrR26tSUv?bbj{I6c{=+S<-=oN>IC{iK2m(68%Zr6wW z368jIqz*)fw-ULkIyk=f94_8^7mgh%9?fUf$Q$||q&jal9{0-@=0|KI)>mR+v0V!e z$ZsNJjGSTTnqg#E7>&DK4np1DWlZ&cXYBTEA`@`m6Z$M|h40i~qxu~#*kP1oFN>B3 z>f|_h@2MrUG5m!>%MpmYKLF*#l4HTYMlW5yh?z>nybP#+i2iXzZKOt z_k*`;=g>!~Ccb%s58E!UpcNZ0!xGo|AZ*JlFtqX-vG7_1ET0<*-mP(|8kbQDb&@y3 z+4G+fgVsBNbHZcLXulYNL@Tr`bRMPuy@2h`y2u=^Poj1|SEWuRou={w&8Zuj(Tvf{ zf25URIT>+N30hZ2ksdXWdRnE$dZ|lEVt5LB@^LOJ9_4pat|YQc_1xLNdK)S7=~8m? z%|Ob{=rjXQN`Yzz`_AjYH`*NZ4POw3JSD;NkIpE}ek_dpTZA&ken+kuBIq8O zOb>e(!j`=Aght3zpnmQxQF3Q7yt%UwRBdgg12?RLryuykUq$sq@17OF`Fkf=CjXwk zvqKs2&1Rqj?}B0Fv~g(Df_6d13`O*^#s{a}oeflWG$H=)F*sslF3K*HN?OA2)6bGK zK*qcXJZr-fVDL}^iXTqH=k1GyWi6-houoo^Bld=5`8f z26%Iz2I_vfh2F}RBHsBlg0nQ92s0VQsD&q>mi>Gh6rP5kpD4q`^JAEkPUd*zo&fDx zT8B)(PeTh2UJ+i}ppFCQ|3TwadO?r-T`=Gj36BU*ft_CyQ1YXPc(t0lP+7AIt$KY1 z*f^8}-yL?s^ugQ2#NBsrxzlRAX%7$2(+S0A%NF2w;npzMau)1)mB)6k`9n!m#!$oX zIqRYm!}@0qv!*+@vuoF0A(t!8W)}x*vCfUF@o#XK;hoQ7CzfxcHf>gBZ5mckRJxq# zV0I_zPqkB%?`2VjLFMeJKMeDQl(K3S!z4(kZ_H*)LB(y|@x+`tc3EU6_v-+%dwzYfSMy6+N0ecuBYfX=A0{ zJE$m35q#fj39->T_-a=sh`ySNHcjcrh4V$U=*9r8X}cJxTmA-V7V79s>O_L?)Q6cx z2XId7OMK(HJHD>^3|H?kg*Rp;z%8Z5c)0fz%KfcB`fpbmUM~6u+P23cqm!X%q%sD0 zXI_%{yuD4=B>7hdRvne(_a6hBRuBOGAtxd$vsh(lhAkLadsXFTuWN~HTZ4wTJz zLwmY5Ad$l=!lfnvSr~DHTBXM$vEg%Yh}uc{s6K1BZ6$fwuTv z^cuHx=n$?(PJa#X+Mk9J;h3+&Cow+(uQHwB()Bd?Iu-|QyaI`!WhgOLTN3+v1l|Uh z(1MWTaMqLt5SDaX=&Gy*cNpl9aA7Z~4gE)~8oLjkuiXiM2nqaR(PPxP!x#x0bHSw( z6Je)qDw!tDV?rJ3@Vcr9a$Kl9le{U7G;Pl#nGNR|Z_AG`!DTD%4_OO_+S8!zk0zKH zrH<_?KEZ~iyiv{Nc1B+JIkpOz$$(v3nJ_IUQsJ;Z?r;qvJFd0jzmy&m`TQx;ThIvW zM{^9P2U$#z8%>VUnGY9;-$ApZFYuAQp=k1~jR2gS4(Cs9g@*^vLbdU?ap)&!@Gkm1 zGH4fpw5DXjx26=KVh`Nr9|!*(C4s}!QrMRL2&Nx-j^wh|ReOitCMJ!kC2V~Lz|~pf zNQLFKV3h(NRq*%XEY1k9&-{u{Z|_G5v$OG{8bv6Y77s7%cm%iOEZ}(bKcs#=4p{In z;2iJONYaxJdrBrC^&4}c=*SoPu_hrIb2pDLpS%|QIjjc{hulZx&rtl~#0G2~r%71k zPeaEaOoOfK%F!Y#Lon`qBltRcnC-XrDA2h@7oPQ;jq=|Vz-y0Nkj+Q|6yLr>|5!Q| zms}Wx`Rfu1*?UgZIi;Am#cG zzm6jbz0#W~bn|E2Y(v3$yGnrLD_y)m>jY>D8`V}kaAgQT9`;r8qO3m5eP;k!dTE=f!disj-ob2%gNWnW4-EoH)m%PP#*C4C#^hz*Opq z<|am#-2;41r!&uo%E<8FMr87VQYL6nj&co-B)`jOQt&~C)V=ag65x@I&Ns$lpO{I^ z>K~>!0h*AN<|OJ%q2O`94Jd6~3|-))3%hUp2MhP_#<9yfQ3fl|9J{E%7;MpjneLuo zWp;wl^NlCUx%C3cf1Aq8ZsW24lX1i`QpM#s&rp2EytD+y6TZB)mVw zrl3ALIk}V8W$RFySsNYj;2XNenFOxxu>!|VFCmgGw!x?A6Tz)g6Uj`K<6!IBtF-g+ zm*Dt(HL$Ut50}2=(%Nr2BxiPHBSno$+>OOHVoSGrS;OoG8Td1D3K{=53H=SorHTY^*c(w_nR&0TQT_|W z)RVg}DYPMs8qQT`KZjaTXTt81=T)}A{Si`fRc;h@EnCd~H%E?=dAP7*<2UR?gBGUx zjf8F5naYlJJWfSR;>k5vtEfTMUyP>BBK)Dmk43DQs`MhM zVb_6EsO0A?l=E~A{Ol`-n%y#>o|O%F(f66SRUrf8axTL+_iusHcToc8Qgf&swGSTJ z-A4PTSONWrm*CZ;XTATCVlX-RxLwwPhpObyBbcDp_~gI>-rWNy;g;8)#~6$n|1_3p7 zbiCC@ikFplNmyikmvvNWG zrRmjnPC>%E!g6}Ojt(k6|4SGczmqt6sRevhF@Q^a82Wld1b99?gSOY`Cvv!M5_LBf zcs2JsF(=y(txB&$Y*Yw1Fm?tWsfvAQEwkAzO?wUcPX+rekl2F{-=79Xa@27++6T$V;1^#BZUx@%g_g&9o?|b zh$wv#gM^}+;O;no+VH0g?Dc3NcF8H?J!b3B0^8K;&3Bw>Vca*#(%6H7OdBI44Oc+p z6)nMRtzx1!C;;%IPt#lb$D#5I)9{~bABFSZ>Y%3!HV}Z)YJvmR3S@o2e+4Q4LieuUj3 z<=H4Bd$QCqkC4`9lb=QpIM}^>55se~MLNA!W%cE6z)Nio7Mx8~u7pgKZLFBYvg)q&?Tl&~6K2-ffdX*nv3Ug1^_0^j}v%>Gj- z^?EhDZLWippJcEc7Zonx`eK;uFoY9{yfrW=Vgd>-( z(&H2DaO3T7^uUf5@TaE}YWq!Nj#SSE6%*o+#$pcBI%OeTE*+11f@WhQ@ocbd^#Qz| zjbh$RTaW#n?NEX3YU~ykfv0YZf*<0mVeXs|FkfRXHr;v{OQvv0&jp(q^0NmPuQehq z?hli5eS*o#%&+JRvjWcU*MPN^fSl2KglITA1%i(U$O-eOFbO^h#3M}(>`5MG#(xY% zXPfpi4L))BV0S0X@ts5thMy!iL{;Ew!C2DotrfFM&XQzH;y}*4GMY@Of$L0E$vp;3 z$!I-ivMrPaZ!UjFr!H;8yPCxeY z14w5}2nkhVQnz^?4r%Vi)=yu+~d0;7faD*UQ44)=vBz}*G} zy1DWp&1Fm&IzsH)xXg_dM*IF-%rH~owo>K*?W|esth^W<8U{(k5EPGICSVU z?K>_N%orO6#&4)ZwUv4BQ{;49&5eeI~NhgL=1B;N5;J=AVH*4As1huQkoa zCM$!4vp>!IA4OLl2*vxxT~U&RM7cw;Gw(68YiD+mq>>~_MbW8r5UEs>gpxw!NQEvD zxsvSct|XyLR7yy6d?OvAOLsrNf9H?+=Y8Mj_#Dq8?Ka#li`H8y-4N0#X`fLeExJ}J zjaX>Ro^;sC+74@^_WFO}RHay&=G`X?RGA}I@$?ftba0TTJ$S=~Y^UY7GL_}YC2K`- zc@?twKkd@r6)oID0Ayk3WwMCZg`%OML$ZCT-$V^Ys^VoSi$td!jKpK#YKl!i>&i_& zEtO8_`zQ0SB}J&=EZgLBM|4F$PIe95mW@%c5G&<)%Ztt)k!7flk`K(*7S%2muzJnC zoKnvVF2{1TZ0DC>(($*AMBlT3^j!qao|MIN{YmE}S6_B8lj1kC$M%Oy^*sW)lvS^! zI)`jz>KBKY?T=PU$6JOA=Sv?9V`OKzLuytswo+577pcg0zuU@g2?&vHEvH4LpVOHU z>-yNo)k9puy8`Jp=hxCzbz7x>3hJ0?=c2h+M)z3a#6h+%;EvSxLpvj=9w9xUdx@E; zIahcy#7a`%q$UlIZxG_YCd^EF9vjiq%3k?9ATbY3VhxU5=H$z|ncO>V!cc`XEZ@$a z5%DH-yQd{EuJ(|S@6FHaglTyLhL6&@+!|5LvaS8znqVsJg(zh#D zvClnhSj~(plID+){hl#jv@vZC+u(Ldcui8kI*p8HgW8)o&mAGsy4HhC#qyfXV^fOty?K9cmUsyJ|car>*!aG^xR8_GDFN)n}+$t)qSCJ9>N$yaxb%AnE6akC@e?!k9Z2f7o1SZIQ`yby;zj zGF#*QmJ1O-W#EIWLVjinXQvV+4QV{dM*T|^*51u#SEx@BMgIN8MU5HX{%xa8{Pul3f~Bm8qL#jAx($7kVJO(lfb_ z)9%fXR@bXDQ8mk?rnw$Wkzpz4pIQm|A^NMrzm9n?HDRx4!mqbVRsD#aplve+=m3cG@*`CL5BrW-g9MLgWy5VE0 zWME2_?WE{^EVsT+x=y*0(bX+s+!wu;)_fjdmI|_@>WQPJj|g|$8JZ)cZ|w=0L)uiS zU9P!A>tLDmuS1ITk!2|}&SWC1xj&V23W(#vrf}ST^O5qAU2Da&=j{=Vndv4UyM3dK z$zCIGIj1Toqx?nl+;7P&+FWG02QG6fPuk1s{ol)EZyH6eOODE%?)?@?(i!nSpOvC# zKF;E&A}w*|O)L55Rh+c-ke+dnFed^ckhs z0@envl#ZShB>eD$FO4CFq$9q|naeMRcZ}S0msl7a;A#gVx#g=BWc@)(Qn{gwS%0eA z_V2bBDKNdo-V@AW&Zc;<;Kh9Isj`)HkGq{z9py?_#0D~t&PTD|y$-V$%s-~*6D<`_ zFl2-NQ)Z*AQW?v%$wIAP-j#lRYTUS|_UyX$)x#QFE9Q{dBsMC1DHrP_W858`nLrO2 z)3G3*2~#CR%dIalwW*ZQfY`>aZR=ugh4Z*&TVokTr#|NCxpTr>53VvuWrOr&p0;f0 z+AhYHlt^2FCmWesA~M)#$cZ``W=Vzy`$@Ez)eT7$dgo7ILo|UjX?-^9?qM&rOFAc= zJV`?oT$m-vsM#P|wC5Wmw;0KuU&1kO4JL~oZ_{Bb_7@9Ztnd?&FUN_ZYCa2llkHh5 zb~Vdi*UXg_`*Dp5qwLc5Ev&MrT`u3(%C|cm$B9eNx!D;Tb&A2HdGZ>Ib7G4mPqEUX zJ8WMYDc;&KAl_svmoHtuSsb#UTHazfw@TU8Sza~~RV71}s{PwE?ZW1%iqBm(vzt@A zL~j4zcxJ=x=W=OKi8yIWnfSQ=xT*=4X4`2@x+o5|)3ST~FI_6Rpd!oXB`{Cn6z=r= zqwM(kF(Q>{q4Z?c3|67EmeE@9i?PalBn*rUW}wV|SZDW%UEwuOx<0Fr%eGJzty`YS z8K+u{IK{)v!G~4MuI(o51wVo4d8ilrJVB9*Xc+duM*d^P7d~^YQ<9i^p&lD_*@FrD zR>lzIG}bLBkuB@6=YmV@rJsCPNrT4MbLDCJY{;|;(oMh4a=tp4TWHl#xne0%DeF93 z=`?Q*_h6`8IDd@|XZ_|8XR2etSe?2gvAp%XQh2bIGji&a42*s()eqUu-MgPCtP0hV zVvQV5A~t7|MqlGJZ8NxHyHeXf!!_etg;X|>>SjHcuH#I;#Im=#W2D;-9hIeqJZ7FQ zY?BlVIquNN>C)Q1lM?fwa?UVpE!(i^(l9ox%^ttJn9Y5pB~x7Qz=ZbL%3hg$=hzDs z+^u6n+!#|r_Hp|dY38<0$#xrSSx5XA=~=DxVQey;`?ldTXJ<<=pN5Jh)psl9O&+7{ zzP}wOPRi_&U!dm5bvKQuD!!O0uZXc0k4ak~zv0m*`?Keu^!C;lviDcg<<@e4@uR0k z@c9l3xp*So4G$sqN=%rgOM9($H6u{pcxUI>&ZPrz_3jv<=d@jxTD` zSCh?UUqgA)m7QY7Xz66B_m~E0_~khE;GYABu7^&Z&@u_p>vA)PqLhw zo;!)9cU|MwN!+CiZ;fPqnPZhX)wx3MLnU|TyP@P+U9iAW(V9I|d|1l6b%2?myGY{7 zFO~+YB}-mCe=q&YpTT(-`U`6`I=Q)x-O@c8HzjAj?B#-jw@QtLmQwS-6WNF3{s_Yt z=(1%^*4%HSFm7>woK!E_j(Jg}#&o=2z})zo%pP!i%-9BcNvC^Sa(C}%a{LSHnD(L7 z!n^Im+~d0t>Gh#~?97rvp`ob>8zQ>Kep8I%Rt>2!#>z@ee$FgzX8Rb{*r1wS@?a{X z!y}k=Ls8u5y*^B8Xohrbjfr$?#eAv2>Mvt3dG;`mqAPbxIU){TbyyUzG*%va%0*tH zlOq2rYLt<#%CaT!i_FmXtL*t9MQP=VjWV+-cJee+EPhNLlYKv^Bo3SwFJ5Z1RupA* zP~6=>iuGQ5$$$A|$U@*!xsh40$ouztSyi{U_`;!ovV&cJWPaaB@tU3(d5cg}Uc5M1 zUXk@cY9ljbvo4*nWlzPh<2T)54Yzt$n&_2Er_(81)QsaCwn!6Nr!qn|{uB4PLnsY5 z3uZg=Z!ilJ8rf0WZtTX}U$|H6s-?f9?+L@E>>uX)RM|J0Wo({5W@O)h&_%ULTGp_Q zGoA2M`g(1E^t?fV^wUUd)>9>!{jR-*)zN#;JlfCaPCOK``8;)L!qcVF^_v&4s^UZ3 zhS*-_Wy567G-pO-`K?)!mccYexh$6PviUA)^*P0{>cP@?TPiD`y_zRA+&}EYuwb}# zX>#FQ%PEXab)?`);-K*KV@pZ$qCPJ5x(jRY@g%o-ktZA5d|4>`=))BnMzO2WFD}z> zztF^;l3smpAk`cDQKI9jBULaPBN-J| z!S!#jkwh$yNn7xW%X;^To5(j5uR6a%u12qw?arSjj*pulex^2GeBxKV$mF4gEXO=YBq~V| znGAhmhAm1W*D@XPx7Uwlt;WkmeEWQvP_$kC*TzxyAfJ+JdTYviP1VK5A@fBIBj<}N z&7)+yaR%!kSCQ9owIZ`U*F|kfs&d=+ZsHHqnnlzxp7`nWFqW*f-~#xDT*)V)Q2Ik% z+B{N8Qc<16jz~Ji)xCblyee(9P42Cy?JqnUrzCY2gxcgG}b5Xf^Ao(n=TS6|(V7In0KDqwM0t3nevb zuei6fl9hw@($EPilCa}Z(!iq`m8%aWNZO}Q=X?ftN-K6HNe0(mm1HOA zN_FNw;f!r%%$l>$C6Qs-OmBWvvcggsd$|*sa8pvy()-XSlz*$Xu=%T@QSP1MKLEf#50vMTV-eL zr;7*sTts_YksPw^G6m!;cYj|XtBO9(RLsejEfXG=?QfQH4HG8I-l~82zlTyZdEH`} z=K_i7v$IfK{lrRip+F#xZ|@f!@zIit+{|QMF=OQOAy1UmBbS;MDv5Qd0@>Q$0@)VT zzoHQ$Rt`55$(E<<$+PVx+$Nq9%XKZ4Jo@l~$;)hGzRWU{_P3nl{&XdA9nGp-?W!pf z+dtWYlg3`0{}MBf{Aa{?r+YGg-6ycW=M_oR5>>e7u5Hq(t54ZpbG7E8XKOL7OH71FQ46@MCk>tu;lq zpMw*)-n@ABxO}2?O4m~1->k#Z=~c6(?|cxGP(Fhl8#_ywuX~M z^5?Kc&IhHsyjIC3G(vi*Fi85sdZ!tJ;mi;OJeGb{^O>4-IOZ*cb!|(8X%FN zk*sXhOQCk9D!Xx19IFy?iiy5!A-%7_leYXZ5MTVhQm(adwM^qY7B}4=D^|O{R6IJT zPW1dfCEF2`A?p2jKr|}l8FOd*6j9h14RLL4lgvWAT%=l`DVq=)El<1YAY1)ZEUz|E zkgGUoiM_|Uh{BZw;(`N_va3gCvBz)zl%*fLCwdJph#v5i<=;%5#h-^7M4cz}#rAKD zScTB zk`C@(^)2bc%_-7b$)}kSXY|<5O%Xy-$py*wJ{L|`Ayq0q`A0Id--R)Lbe0Vu9hpSm zA8a$D$)41{$6kT|xTv2IQiHk0OqJlP;9Q@Mq%5;k*cUj53s|8jnLK3{qj)y1@`=+N zX0K1a?Lg=`>a0$$;M|6eN~56jOwH*LT#$}Eci-o>1Z{{CoWIZ}T!T%R&@h?AL%(0> zvh*o)D!bcuN6#H$$?I6=Uf*TguAh6DNA{yQuR>qOb5*kK@NOK*nRIpb%*!?0bG?cy zW*=l+gGpcYu})uDCO9e_9_=r&-O^OKm1ddeNsY{&X^PU-4kF3Rafi9F z8`Py9!kJ7);aq0BvjTT)_9vn5`;DB>%Nf#*UyZq4;bszgn1)X{kzG09aFq#EdB&`m z7Q_^}ALi=s7jYV4NuvEvU&&Hmy_DMRs}ohVpB82B?htLQw-vq7sN$rowP>G%r6?;- z#Mmw=WbM^5L^ZWRvU@G>SQi(8%;xuZS>EZ((r*Rzvbd#jvadnMMGL;YU~_zjafR!z zq@xDcu?L@~$(CH5D~f$&KYRzGW#pH8A|+0ey9g{_X#Pi^{A)xgP& zk5f96?0ehROrgG#yVfpgjM0$xo{Ex`WoHVf9@k+vE-)AJXGgNj)DJPehn_s&XhQQ{jPk_=zb$s*BAv7b@6uT69!J?1FFnLNPI8!;AsCN$`R*l>b zH?+PZn=@AeiSIO2?3V8BG)(C(~Oef{1m_#OIgS z;lQbJ_)z#9*gh;nTeL?^zPa@S;;%<=d2AOz20wTw)C1t`>H7F|^8qCwD46&yHmAI{uz1mxtcA@458!23RT!q+aJhr6$G_*T&csINCd z16v{Wpk^eBow=Eek!?paUWKCQQ_hIJSb~N zm!40i#?`0cu%oN6cJ?F`b)y{l4D_In`)tYgpG;ua?l_!eP=J`liRk&2XdL`;DHSh! z20araaPQO6SRf;D!uT$v<1vI2#{Ph6ojN#5!v!0EI*9@bkE1OEW2ma%d01hd98sP= zIHafsYd9F8Ya8@oef2`HXjt7m&3!knGpxmjdJcCG*Vqk}>4IbL>0!!#x^i+Ey*}X&^Hi?UP)0CgU#s9qq`|8?2V8A_6E%Y+6bM#su zZ+*tI8+5mEnpQ$QG`>l!d^85OCG-}m>s6zb!957N>_odGv0&j z1!uvi@ge-!(lLS~>(uBki<|hR^HupeQGNWtbQS);pfYLMs3+#$e+JmnNIt-3F_BR z;m2qc(F>o~)89`7@{6o``Hdr6=)rd>{Mwm$z{Tl2YQ3FHNUphq>cq+LTc#0?pDH7h zR?Q)QdhP_Z(+`8cs)f9ry+x#`tN^U;7a;HI4Rr z9F+pFb7CfP3^@tvJiijW%1rbz)))?}f1b{KRzLvUyMZyWynINq_3Zg6DJ15kcDfk@SjCciHU zLbG2?CvyESA*yIR@pkAm?{jWES#o|IbjUF0J+og-TJAUH>^^fM(_tc`%rH|LX{$vfbi@n3=N^%>}lp8`pg2NNTmvx%l#Z%9MW!!~NG zbjb3GC17K~EKs+;i%_?{PBeZRiL}&@5?NiYDEglx&w4~TdE-VJDLuo3{6&T~6%U7q zJhliH+d9JCduu?JmlT9-9|ar>GYRK>19I`y$5fT-530Av6f-!Rs-&#(C&jze36s6l zqShl+V}uv}qP-ejm|X#k?><6op#kNwg~W~62){Aifupqy>5;2V@Xxo4DO9wJIyJ(W z%6b?I+wXm$Uf)c_PsjJ7v4`XE*Y_9k!~_m+x7<$^>{X?m16Sc6FDk??i4ucyBOXn{zm5ZdJ_li>tJcU5viP1KoX<6 zp!2qMwa@Q!&>V&7@(`jpJ}y;|MAo= z%4XeY{-!r8=!pgk_*Y{0&}*6uhOHA9selbyl<5KzPBd!A1Ftjb9Gd~k>GTl_uKhsu zpFPd5Nye1^Y7PDXh^Kp~3cC4sBi^x8jejI8hx*c;ihf?1Kp&cg>6(xN3Vc0B&y_%a zHEnhzm?#Q&H>({>0PkZIg*$7TM_Wo zcA~mvZm`PK5L-QvgCrXjY*al1evk@~@6P?mH2))bGjy5U;&}@=?Q%nnYxWZ6`TyXJ zOHE|X`VXLWbnLJhBhm)D|0FtgjUc9M3xQMKmV>mE`|wSz5&ZB9k)c6`&^YxGI34pD zq;1M0>!})2<^6afuoZv}mCK>S&bK_~hC1@q?FRY5VQ>YdN-EgQhQbL(nBO=EUfdWS ztjr4r*>B#W5`%qM+#ihGZbuP*-^UZQO)MNZ`ha+BW(j29FQH9`jIsaDdNQfzGr8+| z6u!S}HmtA)XmcfiGk=dK&rJOfTBJR~Pm^Nc%CzgG*;f$?-bbKen>U&*Z?N6*7Pw(_9|qYa<9DBXP-A!;CJoP! zQuBRqY^wrv+54O5Z{|Ux`Yq_ZNdv0xiU+ln1s-dihF!OW!-K|QKx3N^K~3WW-Qy{+ zwW|PM0v^ybr;EJ3^bM3bUWQYQB-pd;Ak?lKi=Rxn3A^@nqoiad{2|8{o2)1R1HA}2 z-G7I?j?F>`)k}~_?2gbhHK;s1-kF~0NN81c@V2xLk^e?I!iD#GNgcr*zz7`B17bfY zyU4)u!^LEaPY~>UGzS>Gyovch6Kd~GAuatZ(eKN07(LeoDqnDhCOxTOT4Xp;UOx|- zhPmO9)p>Y&_foK%yND{T^q}=4_31Y=N1&o43lOHA4E^73M;5lfxkSg} zf=K|C=#`@x)jE`Q#yHr2!jt+|KMEyvT0$Q87Dkt>r!GF{qr;YK$!%j9s&eWb+!Ou+ z_!|a*uHCvQFMKQMrSpay%MTI+434pla53ON-F{1;AAEvOj8n1|{<9F=bcKAj{0x8A zqDI3pi?Gyl-9Pl7hnFZ5TLTrhH}mmuKDF2N>yNVnZi z707?6^KZT_!T()X6?7b};M;HI`0f0If|t@EL85K}f9c^sLC4xSTqE9#TP>f0-erBH zLETim%_EUY+-iv*TO2_vJqw2){Wv&lQ!#mOA|@{r!3bYjjF<5RFw7jnJ+1B79>>5- z^hvy2IRKP5>%cvU4bW(X4b}8$3ED%ig?&L;(9dlf&VD$A6bxsBvxokI-<$pe|Gk`m zyeCW1Yo$@-rhngw&}V9JK*t_IwuS6I*aOFq5@Klhys|yxh+rGJ^_Q9j^n zV3WTwOep#d`!??d(dX+)(HBcJaiJzWZ(jz|^@2gkmHV(uXB#i-{86ye%pCcDY9k98 zR8Zp|5%{bc4twn5$&~@~VU6)}T%kUjEM2Wc{)e04Dc{NXqC^Qtp16&cU7b(fITTBF zTwMwIFMkm`parbi{1?rPU4Vn{co7rHICyn;3hwgCgDdK#&|=V0W@FL09nRumaHi#DrO;p>W3DASFPY44S_ zw0guO{^L6bsS)YH@NGjr)jZfqIm&+k-9x1~yy*a4zgn0gA=GU;Kaw*a`3>; zU$mVnkAAT+nZDHmsWXMwX_vQSDd%kY@Bq*R+IE~49b1}BO+T(l_eOr8@1OlZ`8$@; zNwS-Gqum^0(*jQneQ0@WT@fQt1B z)PG)%$b0TqJif*Vy{a_B*QZU!!f^u9b%`(V_?m*I2D;#j3wI;WiUxG&t`>2u+#JQRMsC~N4&C2{ z<8%HW@N|c6G-;wHc>O$%aJr~~BK+o&3#}VL_g+5!P<9rt=hVTKo{gXMLhUs8gSW43!I!pwK}FY;;3ebH_|LQ_xFz}o zE^A7}OD653M*k!z+pNu4cATR0;z!WiHkeYPwo@>4&10%AZX(`wMSvf)>rg5Wt?`$? zWz^0HbJ{a(3T;VtVehF~lsT}YW+g9#s~081sm1egmPo~Y1|DVAi7huBfX_5FsWn>O)G2pIG=9209GNl^>>kZRy^zbu{CXLs zB-BF3_a_qbO;#cw=ZPrlRV>xFb1ka7@f3?Qt$a_@+GINRmheH!VW3V2ym;T`m?>pjsyZi`l@f?8$YJL-03ou+~F#+f572A|w zHo#jgONqT#jln6<43^)g@cDpvvaxJ2nbbT6Db~M*a}V!ARmIQAe`98#nQbq~Q2mwg z@JpZN01NwL&;5t-+|63eQ*->*hF~zBgGwic>0@; zfsH#>0sVwjxLiMmoVJ%B{qM!VEf|2hfpBunLY_^_-+K7|@pjM`VnEor`Jq<55O^kJ zHQcsMfwVYM1vQ#)kU4*2!JU7_w4nVMpIVVgpMT;FZ`83>NVKnsbk(>D z6|R_phu$*uZQ@#Tif<6yqgqX5Rx5z%Zk0UYJ$*3#YZCA3!>8o=h{IrH=n1$x;4}Ol z6iQ6rsDu3#70JV{H$duyS>)y&5GM4v0H1_6!NTY%VA1zw-_mOjaq2~nJMCT`ps?_95)?cUF4_v3$tUbmzZc-Mg zH0RML>3w`_B_r&WUxW7E^94IjA+rC+U37fb5XM~`dcWxkT)a1h$hrC$ggh}OWzDCF zhtnpamG1A5lgAq1Ih^mFu3Cb1zYtKlx*g36X~-r&K5$18`&!u8SOPs) zOe0?m7^BDko|226PVK+SA_rswdSS4W`LT5XP zt3m6b>V!xf`y+-bm}`P#T?OQi38z70(gLKl=OX!fQWB`J+<}*F{)z!O2#xJ9eyfC2VwH2V;;{EV@&Lhxg(T?HH(Ny4y36zG8 zH6nt4!NmnjAfa497JZuuZ|r`Gi2hNy9NZ%xXsyH}dMN75;~(&8(>0V=IL>Bbk~zHh zYd8Eh3sFg*w9qA=Lb%N69L(7>h|GH+<@@p|bkK;#C;WE9EK_|vOV9>$4k)6fl`2HS zI1!9UlL7N~03B9KKr^!!RQRf+`w3&f{4WP>(xu77`uBOHlXeg~<2@aM1IMAq*{9?S zlXhbN!b32;Y9r)-e@rf2@C*D_^T*Xh54rY19&pMmLkR^}z|H=P(B`fQ(7nb(Nmd2K zucTFg<_Xa+^|xqKmoIr<#|n2R-GHe+zIbq<1!@YmBS$Wm0HsA+$)EZmyo`)i_(Jps z>XVizw{jHxa48(Q)`mgnQJyfPbS1u_OoQ%-IZ(gS2!6Y?64l!ogE0h2wEmic|9J3F zn86)*+~qks8u1bqOHGl$Nq{oj2l+479}(nEOs6AP%oK!{7O;{-GMqxdW3bLi)P zC-4;)RnVn+iRkxFL|@Li!_Vjs;Lq5UOMiOK@b&6W3Kl#1(cBq#LEhP0e6tyU_|CIU zXua`M1=^Q0X#EjS(Bi^;e)TCYzGjsT{|xTsAC_+rI2q01cfL{;yo$|3)7;)5v)E+v zvFnN9nzsXabWX>`xjNYMiVE!6WP;*{Qb?60<<>>N<<3c8R=~_5mW_JJze)YnV{1Eu%(Q(pv z(F3U4{|2`tHSRN5w`~+P&o2m` zn0yaknly$MZT*Pf4jJO-!biATVK(|u^9)bcDkDEx?|}=BsZzf;RAbcafG&vtLl2%E zp%gS==#etL5%esM4WulBN~B!B8*X~#AE-=!&((-ITxviTD7zM_mP*B`(I z9XWUvV@_2muf&s2sexMwaUk(*5Y!i2<4N)}sHjh~U+VI2r zdX%=^0Z!eOj&pD9hvrAdVVijlkhD6AWE~%I(~6TwPCCG84yr&e?7&)reR)a9Qq||fuRe$ z$%wvEI9j3tJmV(7v^B4Q#jfjcfz=YcV7nTsU#o>TWPBvFU;3kuPc5jX4riFNbt>WU zNs45y1$frvZIpKRW~@2rL<(PJfcBoXs9Wtn^40RS&}vE%{#(SSRJ2CJl>V8danO7! zWJfmoG36ktEJ;FTekp*;s7LOl{gkWP4K!k{7HIe~2Fv`%;#u;OXxxO2I7yUBt}2K| zwaLwh-#H!+EZIkOw(#)wbAyy$S`jt6VLzp1yq|IeGAOCwQ5&icAw!c)u*jolNjuh%S*?{WWPT{ti1?cAFCUkMvH@xB1 zD`ZzHq|E(4P}}Ef&|L}&*x16G%DlFoGK~5M7ymj5jsDf4R`qGr-k}a`8@QQro2`h1 z%a@_=ii;sP;tM%_vJ$#=-UJ73+JLt|wSbP|?|AXSja0*=#prUS1!7O0Bu>0?h0ppl zk@DGesyE6NwWYN}YxO?#?{q1W4d+!K7eADr(SvP1MhUNgz|9M)d>rPPQDLm)!t6udZ@2)nARjWdOOAR#C0P5dfajr)@VJQM zXp?aixqB!FIM!{0zo#Du#TMaUlXDC|Wr?-mObS5{p8Uz*VXMr~)te~zb8afXd)F;W zB&(wr=?Ul%-UnFhQcG!f`0~^6NWN#oX8Iee!|(K(AW&VUOb?pB=Z6h!;}@OE=BJKH zq2%u_@UyRI(UDh6;W<4QemL%+=f8hKKdMgS=dRKgl)vkt%P*hjDzlmtd^9aJ%6$*fP!% zXNfh@yN~L?>DvwRPM{TB;k=VfQAi>M3+iEcs0sdO5kpvyx575TgYfUdo4Dnz0rEK^ z0@b7+Y&RPP+-}X|#YXhOvJYkOpHePdx%&t_bgLcJw+Y~BVG}4$+=cy9-VootA=C-b zhaFbVaJAO~-k2pZe27ug?A3%`kITNWZw(0&3`AU_1Pc!*Jgs3 zp?AT&yMCbR!FTZUU>u?PBo8>uNrD~qZa{g3IYvWc(pLY^m@A zn#U^x51dc9&i;V{_x0dITfC6^z#QUwd;lTYUjVE8J`+2=3dnM6 zRMm!aM+(lhQOD!xY^=5bP)n=Isc#FS@YWz7>axpmDk^n3l}*~BrPZgX&TEPIz*8e? zTD237J$4L>M?A$*ieD(|EJL4}nT1-OS7L%YjAsv>1;pQU5I)5UWsOb6b*6e)V`>0y zF!Kg?5f3KscLD(kO5`|7g zt?NLNf)GzqECAl;4e`$4N95x64d~px8Zz)2;9V`4Og`v~2i-OQU`T8@R25gE(Q^xs z>N6#xr6vZLipQek4`;#=qsD=*tX)uZk1o$Nv;p`Ssi5}yxZKU_57faq^Ju)T)C7l&(Zg;B7TCP}`-BL7eiYsq??DzN6v5+K zV*F`U4L-6$iFCZ_3BAV`=sK9u zbpozUpN>BDEx-e-Qoy91VB~x9G_pv$M2D}^EVgWeC}!s^}#)V z7zdnA1^cd~xLgnD*mM=!`|qOzqn}VjSuI8Bviq>A$m- zsrMO5e7D63RIY9nR4^Up31?c;lafrR;*}QkgB@3BgOGA+-wIp$tG_yBwpW{6w@3%R zQ+$RTgMQ-l>#j(wIElKtRt$DK&p{dob3n*i8}f4KKPZnVKu5OUM4r!*V3qQ1TvPNM z-=C(0Vvc+o*6uKbMR_Thdb$`6dh1c>SQ7LUL-^yJDt;ZL2_xcdD89rM9oSuiU%#1; zY~XyX{pkiVwc|5HZVE)^mRRJ`y#q{JtOFZXzb2it9{_4bET6) zXNHlArZI5O2OZFvjFCbABckS-C8@RB26}v*fp<(@1mbmiQTXJ;F#M`3<-H}6%wIB+ zT%H>b3g0Hck4`l_|7$L=evd8Om|X*v7Dqw-4tpew8H>|I8swPOy*S~60|>UYg(vH# zfV|URP|b%mAVKjzV)Bo3c9(#t? znBpmLHJ{5be;gx#Eyeuf)^F+NY$N_<*)qD1DnlJUj?@l?u}98g7H5fGoc>;a6byDHWfI%tQ8pSOa-Tn;)wSy z1LVwQEL`@c7A^NRgc{S1fK^$sXzZs6;QPBCx&_gzL$z8nbZ;gn{WlG@M87tz)lK=poZGh}p9&f$Ea;UXzC%EJM zk$gI>gE!81GH}#Cz}wI;gKU4>O9%`1flw<7Mwi|P#n29Rw>A-YppHB;=L;Mk-%YBn znQXJ`aweFRWepQ{ok2%;{RP5}<-`*IV`TOl7w~CjCr@kUD= zCAH6)qmdoQz{Be|fOA|U7a)T|qwziFg5+wqRZ-VBk|f2WWjpKrZ^=1PvCd zfvZKj;NLbOIBA@R8yZun&D&a#o7YjwBvg*noKH|&1VcFQz8CV_nT{8@Z9vwRBJkwT zKC~xpA3k80hBrPRgEeUds=VBSnr3|#g`eC`@o#8Qn!n~zrVGPi&9Odg>KKf&b9cdz zihFqf!|8Z~=~2ACkD>1O*Hgt;$Kv;X8dRS46X@_$64QoP@%gRuKe{2 z{`flxY`TKMV+%;sI^EzsY!H!EdxuERbsfY`&zppgAQf&V)zGB~JJL;G3QlH+Ozrq1gze7vxP&H8$dYPs3y8dW@hO&+1pL3&#TkA_m z3uZP9IhD&(*trF2zsn#?mL(BnfHkrBburNL%;R;bo7m_#+K|2O5x{r7I%F0NP#F?^tK)$ z8x6|Un#qZBH=z4|Jo%?J6}+{GB;)*UkP!=1(Y}R&MB$8|WR>rAqAc(TxY6-|*zDwB zeYr9lc6XJ4?m=^qp&kTvPg?+;rLJJ^(up84F^n#X8lbnBD^fN0&(W<_uP8OAXY_oL z674bFiF)R{jjGmY#_MaAz(IKlu2i$6{W>pG|Ama8jx-&kJof&gZ@=omyT}y!&OHaZ zxKu>1nievw-@8KBEwiGc`y=3A69sywTRFAC<2?1c&x3y5cb}f|A%`08J(srl76^X7 z{f`Wp8NiF%v5TmhXG`*q>%cYbHDK3yIWg(T8L~XtnW#@GxAA!PhZyY^MGX9O0_nfU zk&!#P$)|mWATE7A!5iuZi|;HV7})_Loj-;gVZ0x}v6e)I)iJ_jLOdwgHAJp@p9a)B z>&cw~^}zE?2QPG79I;5}5^mdvkcOfeJkb$Oo|+tqeNzuX;eKD7Qne1Uf&jEhe>7O! zp-F`Gx{=SB0x&c~8!>JNU`Oe7^ife4C$AZfT`wCzX3j|DD0vFc?xw+l1s{o-*9Hh< zJqKv%{|NqEqXI3yO~-j*DyZhNI=<+&n^aiuivH@TQIFHyfw8EC;A>=~kbrZT$~L6p ztY!_f+L6R8OFsGigAF3XE|Fac@$iX$20l1Xi<)d03|>@sfLDm20_Sf-eV?15_FHe1 zM1%lh@G9zTxIyj6O+-CQEWobpXmq`=5>4^Ai>y96VHM3=V1&zdbZf8`S)KEz>J=VR zwi6$t8E3<&=P9-H$l7<5&C3<2&~+C&H{${#udSuV+%^JRMtP#+#*Zn50h0Q9!3r84 z4M16|AJcDaF2hUf`zdtq05<)mOf@PhQ9FF6(694?vE|q&xbV^fs>#Qd+W5~58O9w$ z+K2ja+&g{B>2L*gIm?gAKI#B%^2*_*mO^x7qc26C+k>z1f~e>UJ?PMU3@v};0W}|9 zCVREbP+Q<+oTZ8J9Jd_UwRAjHTMM&YLOvb!(6613zarH z;{F3Qu+E_qX5Ha~18Isx#H4y+NxK73*qj90j`x6!Us*`9;RDhXE+WV3ro$a``r(y~ z>(DbZjr92X0-F10k;?;>pxI@0xWI#i24Wq2`t0c8w(0>YsI(@I@Qsmw`%T>NXfe7a z7Le4nc`)?eLVPnij(TVO8TI$3lRx)YfVeF)k=^wZDe2O;Ag#{@6inY8t`cakGm)K5menm==RpzbK;*)mTuQ*RIB; znSQjAPd)X&+O7kvsioUeB}8eV2&jOFC`r!DoRZKHQBY6>0TC4lEg-!GdqEVHqNrfS z1|o75EC>h*MJ$Mly(1`g#on;%JAupn_3BmM{omvJNMOCLkzZhl?5q6K|?Z;E}m$NKJB{n@-GvLAE@YI>_y)9ok1gTQp$02w=HZ6z)v)v}!cC8^$KkVb)R;0JzE~{__u3@kpXZ)J z0rCm>)ut2}V%`jwtYbsft82NUHFLR{O&6g}+BmGIg%)@hM1N{p)B@aMcL5esc?+q} zbAv`MX|UwZO6;Pe8t$@vC}1DA0MzwX;I*8Mtds5WBRZ+D$I3fs;HYYBf`c8>N!*0r z*iZ@6tAaRXr>?@fvwHZ8TR3s0csEjce29x3eGd+PVxYoR0a)T2ZE(e%h0jvohNo&9 zz$x=Rz+3qm+*e*3WyfTo8MpeQoG}NmRT>uP)uSoIUiScGROyKAW*gwxtkY=wr6ur! z{2sL4=qzwb-w7YweFyhmhy#{i03Qb)AfZYAqq-^RUe7om0Il3_Oev2r@c zYVr3YAK}Huw{f49^y}1eyWkz;6j-)w6P9N34pgq%kBa%q`0(Y;T&HbXxc6ZrBKKlB zJfTkCN9VbiYjnmN8mldVEt3gi@lFNgH{J)XQpg4ymtRMp%oKE-L=8 zyh#&;WOZ;_+c&_(i;8%B!VCJI>`Nm#@Iq z`(Fp+?zF==4{hwIQ2^&?;91moG6)Z!8U>3A!lAeOI$Zv837kb};Q4_qFjRjJ_Y83v zHXZ~-hNmH_ept*MTWyYePkN1vcH6^HB`vh9@*!yVJ_p7=wu4>|UXi2v?xKA3YY4o2 z5!HC?2%+A-iRu;5MtT?J63I#luvpfOSnMDs#W5*lbEX!_q0W(YeaBG= z3)c{lZ84NVq%4)y#HJdG6Yw$ds?_^W@x(2g)o9CxB2wgNN4B*Elk8S|%KG+7YVbrJ zIkT-dmGCGT-xj?eeK;%w^Rx$WHG1~OYtLN9SBwq8SMd6y!f`i1-sN53M!6NH)Vn8G z(&UN^%vPiO!Ar0xrVTpvupGB@^?-pdKcPn(;<*6}8^Q7=9pC`>Al^?PhOf=1Lof3O z+!Kxs$U-X~kCj~p7U!m-#lZ`~gq>z6Nne2at{j2~+pGrng?mx!uJdT(1_|7~+70>C zD4>WB5_p%@4rbMuaEoz|3$2a*f;%QY=RR-}iDTnb-rGZ;gUcaU#&J;stY0 z^o8EI2{E$73^ET7<9>RYhjec&f_Hf-Ao-mv^xvEd^Dj@pL~Dn^0~*E1W(k8BR0 zxBBCsIPs{rKbt((mJRG%Ey32b3$VCtIx_NC!j4{h3%5;lKqk5j+-yl03K+=7U%tD9 zzv3>$8obsJr3VY(wSmXr+iGjLxSd7J^?nG)q`G2R%NG+F!4L7`lv+&p`cQPSM+O=k zkpqhZ72tZstGs~l`TX-&)2NH@?f5yvoOlZH(fn)eX}lW|QB?j#EgmagM2+5;ff zNO_eO@nm;8@s^pDP%Ev+^Ir9fGuN{-ci#Jd5>rc;l8_Bw35J_zkn>QY$0b zxZK*MywF=_yy{xOn|1aH&!{knuXPXO*^YR{d&pzrc@6D2GxP$m*=qw=Cbpvi-ZjMJ zVLs^L0ymVk#uXN?re8PRl#jVh$^-MJ`Jp;dK5nyD1I>-dgFOBrJW<&Mj(OjgIDfwt zs1FE&2L05K>=cR^BTht0rC#8G`dHkwu@oQGqYCo(;x`b2y`BmUG@jCUBM0GC}zP%>x>c7_#v z7QB_|%zaQ&4R*OP@Cl~t@dtKUAcMGxT*fz`2I@JHHgFs4AD4;gGR)zQg3-v{PzP)L z%z?A53Q>=Typ2yLQ;Iu}@S;=X zdBj^I%GEiMYB-inI`006PRpm0nhLVKr`ap02IJn;m4-5EWKlhjFgr*#reyML(oA{j zU+j3(`HH04`)D2^cY};~-Hr#CHBnlpGbr=xODLAH3@^d zzGYzij9UEoC|9^eya<0P;}5h9-vHtGtFZK*CJ{8;5NQ>z1*@I4kPKcssb^<2mU zxerbQ6G19S!oNVJ*}2??Z3woo7lZMOjM0>4Cm87$gGO>LLZv}NICu3-v9?G)+;7O_ z=H0$B_*yGOTF0-Vb5n1E6?==go4*Xg59GFS$K`Co!~1)J8Aq03+j=g9Tkd7z>SM=a z!|TOh$cjQ#f%+gtK~H3wCj;7D70?UizMw643piBVj53~kfub{av58M}z`9}?`n^{b zu-+;a$!{%CGgx?M|@Bj0~|MO0^26vhu0n5ag)|) zaB6cRS4R+yZ@y9p4$R8LJuc)Rm9+-=!7?72c~lX1JiHr)DQ<-+4@ZC-=SHAh8wN@_ zlZA{k=cCq|t01Xj4K!FZ0Ay{zIMm~DT&F4WFiXD{gs{u7)6WJ$jrn`g#?~Vo+x7F& zb$KS#Gm8Vy^IfrCnTg=jiOFo^je9T!Zadd<;zl_8YAjk%t_z!T=AvHyIF?(v8%%RO zgl;sI!HK==xtHnt4)0}aqA7|paQMkS=*u)Uw!uU_uDa?Q%)b}L%`wu1vre;-Z1e_{ zR>S1lY~(}ttv2W^YZ^?;)RVn6rf$oH_coS7v2hRTv{f2)++-<{Vd+fmd+~|v*gKihTkJ)iJfcb1C@7J` zV=ofQlefXPEI#3){FK}voJInZ#YFpVU2;N80X6W#2trmBqiWZ+kS4_^$;SFPV%wMq zD(&1JV&;~Y=>4-=a#^J-S+T{1bh~trOnDYeiCHn^x&&iNzpes}@EQnS#&SV(fBJR2 z{Tj$=Yz^q}c!a77G@-}ITsW)2i>vu{@hXE zRbU@Dv9%0Kf7OBoB~@ULPOgQoMxW$f-lGnECF#hKk%;jc2IC!z8Sn*of=)qiY-IZ$ z$m>`FCYz_hdEs)PZ36{s=QzPQ>!&axZvi9*pTV?lYH}mCE(8^dY)-@bQygo5S@_&= z1?+L;IOgLigUuT6gc=Xma9mbbBBjJ*oQa8jvDBk3fVr*+<@mL59kYgEXSpw+egAo| zp~ex0zR$q;9ev;honBmL(|Z`>T@7aI@ED}t#F0l0NL1arg| z0Hu@LxvL(zu*YO+fOBE)pek@OwnyO%w&JxXe0iw>Ecjz4`;wIwtjO2EymMz_WlEbd zhmLtz%BlX?4gYHHfygTOv3)&wj#6RKAq>Q2WWo@P1ap?QfxCmIlf=$Z)S|}|2$w}{ z$~G>UyjAZ`4Ugzcj@y?HZzcC83okB7ZHZaBmvvIb$}Vplv~#=2uTG-t|M2Bxk*k7RUx*gUM3f47!bGJhTwAkRixmpJ}EP#FWFkZlRU++qGrVy zlLJ@2A*U@K4lVQ=Va~$!+*L_c@a61cxKm3VS)c6-A1}HK*h7wBZukf8)6p;4&LR=; z8l?}WNvZ)*nGY7y?{`o=I0l4YK8}UeNI0cK4x9e+9JiojIV^fJ z7s%Jn16ZFOD00Uz%rR#Q(7C6CvU^SDDC^pxAWsBst%8vY+RRxq{w>%UcNGO3G)D3- z=5afCFVR8WXteCk1i(5s2_56dah38(%w+a+?9hf#?gMN-JQNg(K1{uYYDbh{&WU&6 z_0`*u*3fnEcz*#Jn5zTxJa2#l=hwmNA|bBu!5zkoSiv2-;2z}l$i<8gT!NFIokAsj z)L?_jAjIq7p{LDS6xNKa|da_N+lPpDI^9T>qqviJ4N-7^CRYGO`ys&PmqoaWT-cV$;A15XR7>MHSzSA zHwsAGPr6;1O{%1EN#FkGNqg03s;0+0a@lNMs=bth%fFRJ>vkoASL=OIOSKce%Sjc^ z-yVvW9`(cGJ}}WFhAI|6cP!V}ZZ#aex(z26T)QFx6FgSZKy7xY1xOXKn3tEX&{uxO4Xm=YU}; zn2>Z2d$MmKNN^`P$#aF6-jFs}n%xQ<2Rt<^DvE)RN9RFzP6(E4D1?5QM%-JYl(8pu zcG$yVuxncjvFZ;6AhXsF zEm+YDaF3~QP3o6}oP-vR#*7JY>+~#m`}j6s8~+YWOR|UA9%^K0awVyqUxE*CODC@! zV3Sv`Z6Ot+;)x@JH^HzV2clv>B4)PQgK5|7@ap(JWJzfqacS8ZJk~ymP|!b3mZ*f_ zS5Kvqats-=+TWObbbdR&XNizZaxTJaOLI|EkS1BcNhkOjB4Y1^Cq(0i<>dIhd}4@i zC}~*CN8u)qLGHmRSWvA798>Cylm{>3GUrZ2z8PNJkjT5h*s+qEI^C5MvpE@9ixpsl zdj{alN&}(Ek!T027usyB2|Dyt;2FI$9BYfY+?|8zVqI_-s`lQ?Z5g791((v#jbEwc zR$55VG2M&6&)ZImo8@F4ZZU$ne!{e_N+}-qk=hBt{-`l#fA^7WLK<`tWs7IH<%0& zkG^Uo4m+GL88zonm4lIiWlKFg1ejV#9Y+#cQV?w!CfWTUF8LT-CV` z!Pb{QkFA13-dk0A9Q^q$uQK@D3aj{xi3`{}l?F3+mD*fOrB$nlu1AW6A4WU6mXR?yy>M`Jft z9f}iK?@VNgw=SP)<#vC%Sg-Y@#1qp(!SiX(tKttVJoFqtu(5aa@(4U zc-GfQ1(^f*=Pjwc`WHJ1XV<0pHU164SOz0D)yV=zk`{T;-IyrTzJ%81sF2StV-$1X zA>!UV3>SLo@Zw!d@R8i3#LA1M=!kI}u}YGMZ&5QvgQCoMGX6X&(yo}?zy3A3xLtv| zwoQqwcs7>I(+#)e_2^;M@>#)xA1ATYl|Nx|NR?^Td`;6bY>S@-dw7<`398QAS3{Qf zIc|q}l#_wwdEG{f)oz0=9Ou+pP<{_Ad+?r_w>pU}{ThMg`3L@%7xPA$U#Tpzyh=SV zAAHW8Cw|jnp>v?j;^Uqa3(d(DWSn&kX*5NRGP5(lvg~zg7v&~p$^YMd5n2_ zM=2u}h`hPUi<|uG2`2pXSp}3`;0p6& z8m^RYx-+jLb|vqz%WFQ%x02ssex1)NV_VEl%rk#2%%?OQF7u4y3VDk5TGaGsODLaF z*X*3sS6JV+Ewz`CF&Zk<`C~M6zZvU~ydBJSa!g%@EK4gYaJn!-Fg08>Suj-?9yVE& zh#_WAdPb>ph92lLdf#Bs|7{FRMjuuW`r5__qv!|Tm!%LC9@v%aK}0M*|_g(Y-j_J%V zpH9R;mJ*$yxJb%!dRHehSq#Wh6+{UG6Jo++!svY<5;D~oNNN-lBAg}& z2@e+0QE)m+bZoRRMwAqtNE;Be!4zRsY%nbjw76(7NS3-FDK4aQmxzRMf@x8Kow$25 zH5n93Em4>-MH>H4*`Obs9?ib*nc3;{lPM{zO>#^ogU9O8xhE58Ch~vAMDwqhXz?`@ zEq}#CtM8fkU&~f6#=q4mQ-;R>ifq>1Y><-1rh6bIjcxZpN*cTFK{wfkbf1v=*>?}5 zenYzlQa^|8K{wfMNhNXEw~{#gpCr-opC!?W^-Yx;@r@*o{B23pW;p*y)>RoUe~CF= zp=cuY`%5{sXo*x$5C$hDB+xt;{H3RcU+Jlof2=O~lxiw1pLi!_e{@c2n=Mt!Q9tA7 z=&$(c`ZYht{EDA$-}CdoE=j!@|F#C1GW36&pK3IL+`9>c=rl=BjQ#qAFUy;$>OIuM zd8or^M}hay(c>Ihy+VXBi9*w`$D2j&CUjk`E(G9@#ueJ@MUxgL0CVUsx@NZ%N z&RqSk7QlbCfG5lKy9G4=wm@fUz6whGDrom+k?m?gFGz z4nG2~{8x?CM}PIR@TV zrJWrOR@&LoDF4&h(K(VD(#eq;(pixj(&>;I`gI%rJ^elDNln_ZGe>`){Mq;?_4_AN z(ukdM>Hjmi6nrI@0bk2y;IHH|=zF>Rugg_0#=oskri_ulEtfyb_21_#=qeoibrn`& zgnV65l^H^oq9|4r7{yd(gt8QaW0PV;omY4y?HTs1=j3la!~f$M@uw%<8b;EsVQ7pX za%x~gSl1sNVXVOKsNZ*JNMJPVlELq_5dUf?{6`jlsapTM(3H+Kes`f?Dp@=$@;gMP zKSuPO4F4D#`Kvwt&r5!X?KhXCezl~KCHM}d{*O_1)`A}+~ux%e)OHui63vw zUp6d%mnG>~+9M0Q3Q=fx+1LD=T>WD`?$RuM>aS<8;GfSA(9Kh)oc#*$kBWSk`u#X) ir!f91s1#Co7o>=!j+5ALv#eU zOut8S@{dzzSw^G(yUe}*mEz}P3#g9SW~%e{JDEBujobWgr}E{7%(X_8_Xqbv!TFQ8 z@x?vVU3DL;r0%s?%{hgdq7!&fr6!d!oy?D~y9pyMH=^N*Ca^y4#hZ7Gpz5<1;rNm%e8=xp z7&|r;g5N9f>*zuaHfy20_FF-^kvq(Z_u*}-GCW?-ivJ0{fng`x1rxrH;NL3C!DF#5 zic7R$>xQo+xc?J6#~;H}mR0b3+fGm!WUF9ySxcKo_r67CYtzy}gm+=w4QKgARr z`PzlMtHoo|fHn<2(1}0e{dl#>SpIOF0$u)aIJ*8@gKL8qL1M;!;(MY9<_K2c#X}EZ zVM!I#dCjLm)w)#Asu%MAQ>6FElW|PKy6kno0+YJ-O$@F_=|n&PS-)@~{6U@$SLt^w;SE+@HH2lJD1on#eFr z5ROE@+8fOAv;+0eyC+PN^1!QWK9a#aFVqJN?-ed-tt>-x{&%k#eQ_32YI z+Abh^;TOTna2~sI-G>`>nbE3TUG(b^=jHFM>7`NK&Tkj$bC0n}WPRT$h`%*}Cd*vt zP7^D>U$p_fpUdz&f9&b^5C+?3DDr;HfDI>&aO_4mzG>YVxcF}}ZxAa0ozH%B@p5Gv zKh**bg(`8(Q{;is7a=zG7`QFIPhyuYq2uQ4#k=P_@rhSFko1di;D#lCorTcNh2-Y$ zFNjOExX3qWtX%}qcR`){t-TB`2S-uW(P}(S`wV{6m7@FhyhM{rzfpvIhVi3g(D;cK zZogy7fA(0>67h{d&R63I`#kX7;sLEnY3#V(B(y?bT9>z!TC`A@5mJgildE8PavnA} zjpnr_xZ*m1xJLLFmnO-s}CXl|(%0#gV zqo}z4Sn|TpfmVLZUcF$mg;yRvjn}fXgsKmop!*0v zp53Ma5B@o${b~#P@#q~vxRQsFkWK+8;8dTJG1NIcyUB{J91s6Fm8zD;!!Xy{OB=Zz#w>NZXi+7w#lN&xAH- zt6%5I*+a4<#kv$K0;-67mJY3UHRbOt?dWmYF5Gyl73at|v4vVHV6f@JkbX3h_KD9> z5aEM8JEn2F;$0AVeJtO)!j8M>`SH=uvrtam8a;M6qoMQ&dfd4e3+AZPYZ8{+b-4@; z^^xX^C++B+?bhJiG!=5{j^cBtL9qIxNBe3N>GczO)HZtr?YR_0?=G)^H-@QD_o_s& z+h#H`UU3`GFG>YP#Vo+e1Ei|ioKwwgXU(7v(&yQQ6CMZhyptX@<&7*)oqR;ldCZ6h zeHq84B~-c9$lLhG<`SeWY!()ppCaE~-U~i$Q{+dd#o>Cl36xBkNW)$`Vn*`_Hgbg* zfBb2guvT1-^8ig&+j0U^FPflEUjiGqtORrFLvd;SW1KwNfq>3xemC<2lscQk$y=wv zOm9CrWM*UO>li3dIRU?!fLq&!35!xDqj7I5NT%%tx%3)Pl+%Yzy1z+h?N1g`dlU0N zD8Qm;?hsIt4C1k^VDd_dY_`n6P!TKKCEbfk6P`0Q_p>-9xDcmJwWEGrMfjjX0JjS% z*7VmAuc&N6>Job{6{1C_NDg4$qGF5)N2rW;!Og+R_*GensohORwA|2I77jPu$X)hc8TT zLyOT*p|vRGZal%;*f1{8k4syYijpY>7cLZ!gH0dR(wfCKw}4>p=|LFrt2` zpy+ES9Gx|sbUBR1PZ3W1R^f3$XFH0q^!>y=m%MHQfy(b~K`3(C1j>Pr7 zcHC|4RZ``mLlVBa;;zy{9Pw}}pL0$W)+_1Jtg#ZHk(@}PUwp&c^Ueyt8WuV4DJ{Wm zN=x9|m^H%H(Vpmkz8^l!p25G4OMz`!XUIJ77BDkw0p;@+cz+j#6T=E&rQ2LMHeM1w zukK<-A4~9*fjC;lW*`PLcyiE-{_!{>)KnXQib_%FB`evpI2mYMH4J7QOv4aK1DYl! z&$k8r6GRn!MTz61u%|bJ2(P=buXzixY~@8Pxw;;tx0nEYScKJ^C*W~sh z_!yd+u9F@|V~7}di_H(^p&_spPo_kIkMu{jQ$CswEUADJIZ-xi8(`V)By@_E$EG|^ zeiX%%u({`1q-i!RJ8A-UR}$gdl4fSNuN&{r$;YE&4LIwhKKuyKZ9DL1siaLr6ws2&BA##^DgmAmDqZ?W?jQzLlNBM(p8^ti>;$+YW*HYP4D!(A^Wxr5tE z6#qIO-r8BYR z&^$Cxfwvv=V2VgFIlb>0Cf{$vV(mEGnkNNU{~W=xfmTv}t%`KV2QmHG51GG&3QbyS zPd&fJK%;d9{`-1-=vySRNhe2R`N+FyKhupL(-*+^hW5(HLPx&pNeKp*Gz&jYJ40q& ziRA_LZP2?K*V+nSy?u zFG+111*@PxV7_Jqc8ePEk4jTv=Gw)OR%M9Pp!sL`Qo%YFb29CM#7GtO4Ks27RHLcX4#fAVE>S3J(A*u z)AbDK(P0mXXy#&?-Mog;qffx2sug4`r$d4#fv*c?ux~*pEU{RGmNW08lHp9YGg)Di-9=u0k2Z^%$WR4Qd~>`R4|TI~Nw9%Zc%rA@P=ci->Wq`Yc6j$Cu-=!Q;Z^ zJAT04o9D>l3#UQj(p&gkd6L{~Jd2J;UxB~J8)0*<8-%;|fXWId{O{ikSS6pyb`1HM zL-q|g_|Sp6S!{!n3K?p$egW~gX91OWx&#$zXK>U7D^xu73jKz+!|t>-EYL3<#_0fm zea{k2bio zo!$mpN7iFD&4GBEQ;>cCfS`P)g7B3<09ttm@fI_ppVqD9KUMyKO@TJ;&lzBj{#oR& z^ikm^sUj8;VgxZKL{Y*Z1an%h$J_H_%XAN=5a`*I)1CwIW! zlhMp{wlSEek79!s;h;&>X}W&2VEc@TT%a)uxbH`S@zfV2`JOx5Gf*wO8Y~6(Rq9bK zxQ4AHu9e^HCi2vjYos?b4Hd5G@a{1M2d3yu%J|x$J@r>T@PuDIC~r7a(>~x<#t%Qyn^jYKPh2 z{p7>LM7UqQ62&Ah6Qjvmyh~$mWy;(SuyMjEtm?f6%dW4aJGxWZmZ|RaMq&XWPaM&; zUIu)2SAnHZ66D3p^7p&-X{VbDI$xIHTe5C~Pw-_duX-x1DL28r&o{%;8Yh}FEQ1Ur z8$-Qe9A@dI;G5`hGWLf&owF?oBb;L^V-mJtp7ADh=`_GbiGi#6?{nBDmqB!HiHB!f zZNcuuG5Fy$6JwhjS**7bJbTwJup;SfQ%4&_Ec}Ga1FeOPdyYVw=0udVQb5)82XOS^ zF_)AjK5*<9R`7LQQ| z43Cdjq?)r2UR4#>0f!YqIKHAuxb}!X9GgF%57viJn+twuzbzS8xZD$jWUav34fE*e+cyW`F=j)@N+X>X!_i@geV;DEAnwW-k-kK6-=7nib@w;u*XVSI91(9Y85t zSC}{2gfBX60>wvvlUScB2-N%oB3re1@1|_nwf7&f*mW0XSSP{>?-1H4Iu{FE40xd0 zQ(~4L4*kX($+g-)FfAbzXBA`t8}^Z8IVM9Ssb-dCo?vq-5A%Jl;FdAb=(j5r+scRY zgKjo(+(HpPP2D9twc<1JvM+!O8)INx%`Nb?R79QI|QY+jkK4t(=V5^ znT2jc z&}y6miNzVz7l|lI<4_;+7ncX+= zr;AQlqWbSLJiA$pZTojfcqQaJ#%Jc@ZMB1Vu(+5Nzp4iFO%w3xW=-lkV3!X!Z2TWA6!Q)AeE_C zULXp3BOx$Yf{xK14nj#Iy6Z;)xDKv_!1Q`t_O6fp+k6ks?!1l3)zeVZt`XiO>4Eg7 zi|}{4BDXs9kmN_lLx-)UaN_Q09NB2em+P1d#HQJSZ(soF3wnlOQ*|M^K!HCi&SPyB zPb=H>|H1L?zuAM?*{tW#GTbm(0$i_Vu>ET8IO+0Q@<_9oO?_m_zua)c@aHY~IJ5~> zFc)wBTY=i0E}&^4!)s(Rpkwa`P@i!EY=Tpz;Esitah~mblnd8~Rl6AaZrBZMj})_8lfhC`b?8^0 zWPHAIwm@PiYclNcp;?I=!BO1}-CuTM=cFyn>!N_XTXz)~n?z9cAD`j=VoI|>eVZNp*^!OSCf+1Mi(vf> zQ+Ak_vG&vl!a0_oNX|4fCTnbkt2dhBsio1(sQnFGdNz{{8(q(i`fG8SkzG)$SC6xY zKVd7bq~ShUDF}rTaAVypm|V1zrUdCh>ZZN8^6@J+%BGr3PpBugHSGeElw9y#pM+aJ z^?_B$WZ=C&Q6ZpN*rL}AMaOHg+AvedLNqb(!7v;-t`P4R=<^L%5*YU6lG8TFG33b! z+%4x%(2`?O$RRw`+J?@@WKntfZ}Q|+Xr;`>I(MQtb;fMCFstQVZ}=X0%-+%Tr!IwUbu$C ztDeA>BMLaC;j_~o;S<5;8`&@=@dDfb%#b;OBwDA1z{pjX@!&EwOzmCB1O4WZ0rN27 z-+?K(`BD`7&p&}g9$myfYmI`9*J{ySYYBwYS&$dE9#7RM!nmOztaqm&-5427EMjM4 zfZcK&@7e`kd(7xA@e6DmHGyrd-gsVVbX+7HHLV(<_WHQ z|4}&qPlG@+bsuz%+=r)q$I&*UF8F%a4Ccs7@{k>`D?{tIU=}$fd|6kF4o0T*S;STL z>4OgX|2c%e;`d|K*7I;lFW&BL{)5=?3rUw@mv*zwm|B2e?p5slA&XaqoPKPSUB6_-hRJT)PN# z_szw3D>SIaxMWiNK8^|omXO7#5>ef1H>1YdSW~%ncrZl z-_>~DEFYefUB%1;gYkgRN@14g3*tMc0C##D;t{h%kbl}h&W@YLd_J53muG<;+{$YoP+)yyxPc zXdPNvGl|uH+Qd?Ri}2;U&kGZh(goord&gqOzLgkKAelk#q3a%9Xouxm)c@uo5O{Bk~+i@8Et z89`agCG^qLHgafQG=9+6Mf;}*n1_xTjX9G{>ZY1;CcB;_#dSllkq-CK*vr}{44u`a zesl=$g;)KLaA3kn@YT*nz4LeAoU$%euK0)IDU*19W&}8|aYV^4I|R|aHz2vjo!pdq zfI08q2|V_mV((__vjOd9L0ID|@Z7W$_72VSt0@V%B3uL09E$NrKp*BWav&0R8gQ+7L;AcY0n^1Lpyo+G z{0>TmLsM@OR@f<&39Q2Lt3-I!!P5}*Vya-@_Cj(Y@fduq{SV)T=*T=s>O(>I z?qnDU{DlVk>tX5(6PCYg6gNI|4K{^56ZBp6#g5?N)V3m=&AHQpYi#rIblwd9OJog; z{AW%VM@d2B?ikp3T@F_Vd!mbOI?4?a&?=NC15dM@qwoG?)ds_O-j`iu^tN>-JsxJ~CKNf453`5-?wqVPMwAdE(~apk&nkG`Rt}R~#zDM{P_U}h ziQj061$ucUR9<*Nyss_6u7tCoFJ;f325Es|SUc0To-w3fqP(~FDooFAVG8C_e1yL| z?$j$A^0mo0GjKoZcGtlVx4jUtsEMUKxCrOQxWKF{1GuM3jNWlxheUTjj!|~uZqbuL zWvv1j2Za#Zs8)!$D?#75L}Nwa74#Xmlzp7XaJk8QwoE39w0vrU!dd0GW!haZaGlD} z$xvRC+{ThbXX2NsQZz(;KEB+wA2Py+@v^U7=ysuq*-IR74)>ged6qjs*X0r`UNZu8 z|GHysbs#o{6=BK0>zIEc0P|P+gP4Z6@Wz4ReE8#EIHGkLPT3?uO`j(TpGg6AXzf9> z!x=a&UY`uQp2d~tq|kMPISnarMAP4+xtCu*d@C`>a}keNTjybZQ+*Mnj5Gxe|0ptf zy##$K7sVp(x1vRBB;0wB4^_J*>1d1j=(?@~R5jDc59djE$(dJ9cI+VC^K(crX~)f_ z$8ng|3gWilJyb7n0Ufx=EQa>g%zDK7+5kKxwF!lrexTR=By6i=OtM3ckNRx@om0-? zealZ^I4+ctmlCvJ!vvnUHVKm#q_ES*UeM!}2aUqv>=lk<6}^LG1#5vmktHx z*&{H8Y=aNqW%;=FGce3dtvDL0nCpZuLHeQ0H8P{;V)>!s3 z&ICQ=Re9+Q3E{Mv`{2P8MR>1Ti0=&wVbzZD^zs`EJnC?QOwxXY-oCcZPu?w~>&4=+ zO0rKlGSdMpZ1$n;&7+lXTa>8QZCem2u4S_lRj5VpVeq^;hJ9F>3)va-xY!R7{`dKH z>{j22kK+zQ-!5mlyaB9}Fe zLxsg5!Pp-i@Z?YvahfRuJO1r~TDlkQ1wHUo#sCuZhWd^N+3;5{PhfvWnsn9#;jeuq zIBmWro#&qG>?GY^x_SvE4fcLuH_`Q@iTi=`y3^L;@KFZBDlI^AKqV`36tl1 zgt1s5G*w}sCV+>7Tp)Zn-RWiG$g9%Rx>q2p6Dbh_sZbzxapo=k<4j$9^FJalPsQXL*# z`5brdG66Ts^vd$3uh`suSD9z84FugQz~R#_vmbtEVUWpD^{Qc5LcfFT?bptizn%fI z`UE?);RbBG>_iI-Y-sq1a*U~$q3x{^xN`Sj+>=)#G`l*MN(?6BLpeM6PdR}!F$)|s z{61zD1=E`52za(O0wQG*zXX3H6LOSkK!mkWB0!OM=c#eE(WdaNbSKVx^qHk4Ri`mj}(lqf{dsou*&u{lQkIwAWA7LZgN6FIJMBGES)MDoG}8(ZnhLcH)>* z*D+{q3;e72icCKv>gt;Cju5S6TJgb%6dHj!sNaT>{ z1#>|tW{iz(TXDs0ReB`17_*-JzdKOIyS6t_#byL7$@v8fURc0jY@N_MVHkaKUzv~8 zHK21eQ(@wDvYks?+$9y44{4XbKEBpMicSfJ{^ffwEeA zt~O^FuD_#B!-C4;XW=|J+3^!zy4?e9mqL&hjw1FIKgq}pH}2hUNG{19LiM6;SZdQP z7;g2M-TQHl>1vB|6{}fTUDgE;H0+#pV+C_0K`XYWg`>_>aS!*zf6+*7_ zJ~HoHy-@1sWTZ3H_=O>sBHm>Q8oi2vItLE%ZfRtAm^ClOR9N)g9;@zKLgzU-{%ZCn z5?6i(ekfkYXZkI8=MljZovtMF+##q*7{Hm12LxtXQ|T&P&W=tQ!R|f}!D~LD5ZK^@ z(Ho@bqwY7t_Fcu;ievDZ{4P|@a_8GOcncRi*o!YSJ6V}rC=CmW1|ed|*MQ*ZnS4taB-v@Z_gZU;KIKAp|~c^lH+%0?`_s0}VTdUX7_U6{RB6qouX zlWe;Ul|HLQsJq`vFbTVX^FK(_y2eJ5SW-z=rqz%GlTzTuQ)L^SLe@rFGn5;Q0 zf=x>r;8SuG`ES)G+#FUY@Uv`$NJB+VOm|lknY?fx9krJ z!$>+>v)wTHCgS(O2~2#`S!}o)#fEjyfD`T$KyIxp&rFe{84h!()TNC-fNlXX)}(tW=hRnK51@FkKS7w~3+p%*{|aGXj6_+>IAI z?fH`V8vLoBg`m`Y1d;O{Al=6v3qsLIQ6s5`teUROrH1<4^?gTJ zg{v=wu6m5m+U+1a_O>u{+j5*XY8YHyJ%*Ajb)x;lmS*d#;kN0vY}&$noN;&+Xt$ro z{n}Be7IY43k4nSt5#h)ltN|naOK?9nl!(8Uf-U2V(bYH+f~J`8oeLgg_?t&8LZg=j zxBVc8td2t3?yETJrAs1D>|AXO&wo*6C`DDT7kU3dn{HMA zjJiYF#5#$;cpx$k1AkwKd$RRV{l1@s8}Gtx6U(9e(_9oeIGRV<)WaON*=&3KEGShN zMYoLF#5}xDV%RV#{#!YiTg+>N2UlM}SzDgaqP?G-I6ImdEWOGkM7NL$yK?cCdly8{ zdBSX;y#Pf#3kMqJKxOq^?8;lj`fdlI_O8Ep?ByomB}D}qyYw|KGmFNR&#G{hs0Y2J z;fyug?jbuo#EPuliSG{TQybf2qPul6XuO;~l<8a>$^yJ-W5oiRpsJ0M3rw(Xc?`>( z-U{}EvzJP7JA}>>=3;7+Y+<2-=Xq>10-Pf63;P?wA*9qw zAiwS>3bQk~=$9zGspd|njc$fhU)$iQxf5S?cmrfU3?+YCdfD|?56Hpi#_*%d7<#X% zLG+Fz%%jH$)1y?m&yN@=`+5c);>`GyW7$IAM~fiBPl7)a{Q+pboD82iiV74`AYoM` zxcXdz2IW*xN;M{Y$2Ppc%SrHd5A=vEXLo~63eUc)g$bYH*wrm@STNK}zg80i=f?_k z^7bB(tec0%V^+ebxe_$__9ZA?y#>occH@6<>x7$w@8FhzL#Tzj*yx{|p|mp(WM}>) z7sx+!3{wGVK^*=|v!T%y#e!=NZ?NptB24|So`t=-0ypF)&;w<6Ah{s>Hcf*u<9EK zQ*+!+>_e^4Y&IH%4-+){09RarwzrbX71=Kx?C$X2-2wlF|LH}h*Uc0jZ zI_HOwv-x4LZRQPdU6qOkN+R%aeiQDzB?0i@B)ZJd5JKEp|oG zeRk<2c=~L3JNSaB?OF&nhV8gu`g52+jJlig_^>K{}} zKx2gD5NGJfM=z4Y@TE<{eS5U=gUUTYe_Hc79mG>%8BCJD1+BNOXpTl5 z>AhVE#Qy`i`o{$tG>YNY0!dmHAVuYOf5XylQ&N|?TWz9|!yYpYr zw|@d(uj+_itJ*NO(+Gw~t!EYwx=8;^H$MK~2qEkP%mp?VXYkX!aPx z%9r4>{a5kbAA6K^QRCO{>>wJ!t?a$#b}YMK1|iFwh)aW_~O1)>v)fOqCSo?DH@d*~pOP&sQ zu1L{M6Kq(sV>O!?;6tYW*bf~lJ=kr>*!cMunf1H|tezPG#hR}L;puPD^xAJ`b^8>! zo{1$+yC1`ezJq9d%#qr(1;B$jpV-KcPXxCf`tsQ#FVX4dOm60L8ryoO@R*Q7Hf)^* zw~O(|Ah#M;0R*`)1Ye3&V_2;tCR%Gy?eOzNx=Rria&O?L={r!{Ef6j?7C@xcNu1<= zjWqu&Kut+WYL~x?b{ucR25TicO7Si zjkx8|Ei_AgOww*f^B~FVf&*f=$iFdL;j!;a{F52L&-6aP03}VhUt)`ID}+qv?OWFF zSq;_acHlToXJ&KiHq-bNLf$^jB_Z>-z^o(Duyw+6u(J?YLqyoEq%KMb@Vyh*zb$>a#x7z*cG(fdO7r0 z*bD7DBgv9DMIL)bAE$p;rn$SL$*PPh^6ckADt)&X)-93c=YIC${FVXYbJ`lFDn#Jf zp?56#Cp5?`=-w>=CDXDE!lk0F6k2rsS!H~s;dy%S)=o5#%b(gEz%@MR}9%cH?Q zYi{m&8yn~r43!^`cc*;>i@afct?(PHaea=5CU=lQsK!^h7jf0DLqj}sD><-f4Ww;q zggFTgaQxv^D7^3sN1_z%kqrl#(HhL-Sw3zy2*tquUl=uZ5oC^fjn?<;u)Z@6)P2*~ z@K_;yp4toJR%r8@ce8QeODry$k%m3t*O-&j0NyWpMMi3##y|Pqba&Eo!M|27GMSA8 zuhTE!+wVj?Dl(g@f6HeIVF4KAc?kX;*oSYs2U+2;2>Nc{Q&6@%$_Dq@VVTZ(;fTTW z=;>a~7T;{$`)&b@p%esl*YR z#*W74KShMuc3%8No5WB*YzlHs$5_g>qmUvO2yaD;F)}I^{<wiQlRs#Bp zx#*pF9HYO-33YN#VpHp3vNk~p&emmv&XG#;bEgHYI@t_Z{17h}ok8;Wz z1<7OeAwB&~kEgw1!3VN25$=%*?F!Os2V*taShd}rPi_7D3*qHXFR zDRoHO3zFc8!`;f%F$*ELX9bE%EAv@bj>CctnK;`&5NZc&NLg72QT%I$wwaUBJvE=m zPgw^u=eFVB2^P4v$sFg|r$YA@Lz<^ljmjzqNXT+u(0zWIy%)Z~9q&fd*(M$MhaE)m zqW8kRm%7ktQzFrrox?9bv1FM)?&7f_=6BY(Ntjz%4rh-WvyFM%f&Ujfl(D80<5p9$ zAlwUl@*Rcp&ySEttKzXT%MnWa2)tkT9gW&Y!Hg;Hw7q&Ou9VC$Ce;g#Uv%K9ra?xm7K26q3Apjm3D-w2pxahm#SJ?ZAeCrvZ{HunK*2^V8q~rd zF*O+eGz#>6o3ZMy4(P~sNonEy;F1V>KPDtuK+M zj?wV?L@DT1$xu8xR(71Hva%43G!lmmZKVsco~Bu7OTRE5x22Zrxv!X%Rx7t z?HJ}2O;pb|V@GEi7+k#~ocr)O{L~-Cim50K9TKYO1BY48sSEhZ&5~A^r-5GL7f{+W zhseF01qWi~=<4vJxX9)g#{8WIi}zNMy39ndIwirzIgMd&W41v^Nid8H8^JWb=d;^o z?y&#AvqIuCg{JdJ2zAwi84309;Lq{Od#w}k+MFF+MAw#n?v%mlM33)IYQwQ>&0$q< zFzG1O#(8TVGwtq1l+#&*`zj|2MjSp0Q!STa(es;V<8up*iXzZG(i^Y6*o4)pL9k+v z1qSI>vrRsd+&v&0W|2~6y73k~$(;tj&pI%5^RxO)MByXHscD1*!GW=aokHd#}*i{lkJ=>~& zW>vS8N!mpV&&uCp)+!&Fzwk9)elCW$L#l}N$}4bq_aFwC?88~bDrDn{cMv}L7KTb{ z&~iyx2nx~Wu6jEK z8XNKD^Bi*LP7~3;8$um6G5q&OgnhlT9zI{%4Qclpp+T=o_&}@~sMH@;;8O#)X8mKo z@{T}|+$$)K^nf@o1Nxu!R5s7J5m|mDIM1#_F)-lMro2X#_1V~zsRE0JX13aD4O~7Z z0TNE$X90yTNbL!K-Wgbfi@R5niWR0@m}`pGOM_8%p`G)^BPzlTk<-yL!4$)e$k6+h z7PQ!JXs%Vgfu#cn`0!0)_%ukCS7-clQlA;ijIQ;tr*RbTS}h~f8h4=bb{Dw3CKnUF zx4?wzVVI$4%h&8qgg*09s9s4i;fMxKoY=*3oo7Sw$%SzG>`y^~-D&*VC`MQMufnPb zNjRmEM80=K;;mn4zqu$p#V>3@~9#Eeu~#f@hXCV&k+c&|SI@HqD;n zbVyzw`qIYGP`)4K&-s#^^P531&jA(h#V~d6tyr;KlTk5b_8YG` zcwUqST}wOI)z|>DMfZY-jx}yxe22(eiea)+KHD?819y$lg~QqxU{vR6mi zm)3-X+r-J3wj!A+nQQ``X%+Y~Gy&JkO$1xF0NVdmz;cqe!sTmHbfvQ#9quxRCQlCK z(mm$9zoHAKNH~z=6<#Rnc>wM%zAqRNrApH*GT_3ICdiquLI>U-f{b~L$c#0>{s-4U zqF5RhEOH{DL*91Ng9UJGpaN8Mu0z@5=@1reNgIF7h3ZP~=mH`~Vf5!z+*0arO{;xMP_LwkPixx1Hu#WM^SHDQjk z_Yh0p8rZ(57?!XHc(P+E4|rw98>7|_`LZu8-R2Nmn@3oqULM)I-WMKc=fpm!nO&ns>jh1Wh+-1s0ZY_YZR(D{{P;TAg>;_52gw9pIfCE`u z1-NA_J$ytT&B8~rGh0f8yUSwXwcH=_(6&l2Zi*(4EGmYk3ni>isS)dMM$@tN7jT&F zF`{pKlsFId-Rrif)1zs_>8*+w$Q^S6L#ZsDygkG*FLK3iej}O9sBbtvWB|na5>aUC zLOd=uuq6#=;Aw3>j1)PDlbUX^aw0~RmJ~yt*IitDp&CplJ|n-rOyobyj-cxIpLl6$ zCCgMBL1yHBAn7AZ@%zh1U~9CN>U+!`A{?Xzg&*2s>9b>SLV7#QS+p3YnmOaxtJmSJ zaU7P+{|1*VQikSF2AlBeH90xq0Hl@t!Ci%gU^TTL-p@OV-o}-v+#AfUuem^ylXLNd zXaQbZxEb#}F$Cq{cW~oEGK`It!LA1xn5(^vdfs(Lr(<96!@oSD?HUb9FUOK-g*|Bh z^&D=myM-mK4?)Sri0Yoy!e?pXj2@WEQrlPZo~@tI?@=snt?-0bt{LE_AB&O>l61ay zCOS1Qqt7~y!NR>i$n)XbaIbEIaMQ(jY>kVDl8v`n<-+^8u=z5Ujh{gb#2=GCMe@}A z)ji>ND;H>Kk7eI7J`jsh({cBp5z%<~mRWce!(r)EoDwTbw)PYsb zwxUvaK>8`JTor)>)?PFzAy#+=;%R8^QHfZH%d1glWPT!qJj9*d+g5&=B_tm9=(~ zKQ~XoxnfOPYTpo1skZZvg0}7eJ_J6FG94gZ=ntOyj^TnBH}o@weAVuw4L- z+WZ9Dcb$N-2h>SfMFi}tvElVSg=E%^TolbyBRhj^fZ9!?V$$Mtdh`R(aT4Y8gEH}M z^=PO{?1HCqx-hnX52{;w;z(ODI2nSRaY>**^E&tC9KO# zi?$34Ld*2!Xl<~6D7S5ZI@Q0#w5}gNRfG-Q8^7bM)?-4d=la81H&0Btl#Xgf=V4Zb zGk9g)!nIyMNt;i(P^-olOVsqZ;ssMact#0#lnvc?5>JM(m$9tRVLUzI>p-_@NTa^V zMs$iQBZK#SY5YAqzM?r5S6YtdaI%yv3<)Df!xnd~8~8o^?=^!J2Qr0y&NldM*m<~J^_PkG6p*9N2ZiDZ>U_J|5Rdvd z4E>9QV6bo|i@CFt&k!Gr*GG%u)ODe-{Q6PQFOvt6dS#SdDoKa!L&&(FgZILq~|9i8<5ObF;3X;fP7QNt2>&h`xOLZw*o)bGAmjc(XI#ZyUSa z!3hfrhu;bE2WlkIV+oR*t0_rJ{)qVe#Tv;k+xhKNw9+T-+A~Svw_?G-s&@X!3}?al z#KK9N+9dIHFP5~Wt+bWX4if};5`s*%5V0!!BmSghA=nYhm)KNr+TSYb3#2CTi$N#8 z!IS3!IHZ zB^TH~1=Fks#Lq?N1>4jpfq}esyYJUd@$7CO5&2GX;b-KEJu0UOYP$RR0qxU8_p;c6 z12dE))TpE6&dg51mL08<*+aRKioSYrjl)jKvKz0(^rwr0hkIjrgNCgV{{B``L|TKW zac_&nGa+$DB4@e;514dM*(4?$N@ zzG%pGiEY^+TT+y6D>3sg<(r28o%D4J1PzK)Y%gy~;72YzAg+3_c5)vz@r!<(kjyzh zAifxrA&|+q$uHNfpVZ-4(zPc+ylr8qV3ENI!SQb8c8mR z#rN3f1haD?-D-t>QJ zwx1_!bvDh@5+pV<+J8izgilCKT3+Bb`DOy(x1+x~8l6Dw#{ z^6gt@wSUU3l049gmpFJ-3dB^L;OV<#l0KTtUlXj;ej#XWd*x)lY@n5G`)ZdA|Alg) zXz*K|_>J5F@%;~zSofm>K2Kdm(%d|mzmKVta7&fiNp6M2tJ+2MdZb&>8sOP(H}i1Z?$~!zNTk3POI} z;>Sy`6^QdDu^2~<_FpO&1R?%elGrqL$)ab9?VpCO3uK7bj2#US-k)5=!^IG>4|Qt z8jH&}@M*bOru-<%NUS1bBsP6#D0VPY6Yo(q^^au0lq;`2eJ^fTKPQKX; z%HmwsgS> zGj5TOuj}%I^b;<4v%(A_m=$ok0fFV{*$IvDm}&7tf+w zk>7MhSzJZui|oel@Dv>_L{Dgc{>WpdND0b{&99a6RDTWf6xYq*Q$D*zvkoVV9)>6J zcdT|2mpgnBok>0*N;{cB$E0i)ou0CmpZwC9&ms=;>)P$bH)6^}r8g#cpH?XFwdN<2 zyT=U0URRCyOBV;B@L%Ed+e2PJEzy%)1^3W#CLam4PdjP#;$-+z-ken1s!Rh7JY;G#{CzRrTC8ODdspDi`_}Z3zVh)Z=`vEn z^OLM1%i$JL{F4k`wT>S+VqncT|Kd%r*1Ca**hZrFj19ae|K!9b9rogVGi`XuwkPS+ zYCgOJ!aC92?j@q&hI^u5jk)5;A|rmmhNHCqlOfTAylmbFaXMZR$`u7{99C+4QoBd^2gyun;LeeHU3my0i1<6KFW z_ZrZt)jee8VJ-=7+rW`y@^Ft{HTG>bByTWO$dX0_+9 z;hB%>OuIQO_53gv@Q2cSmF45te zaiTrfn&9=(yV$A3od4yz3xAfWs>s*Qo>%m7ov0)`g*U%YgTJN7kf;7+gm*AMmMk{A z3Y70;@|JFVPWGCclb?fEinibU!83fo;m(k3p|zqf!N_&#*rLsc{CQ&$yjk^x{Kl(7 zCQB;FyMe~!In6sXBkUhZRL*AVub4q859v>(Xg_uhTJi8Ob zdlVSrfZvz+Z@&fbb57QZ6{G(0V$45_#gUHuRT}pEq1s{b!^S;gJr7#^mHmZBRWSG! zhm<9eqBMS5vl9Q({{4K_Z{|D&xk*mUAGRbkSWVI;vX{8Ln<9AsRENL4sYiU(q*u)A z-zgG-GX8z5$Ksobc`&qXj!5;h5bw*0#rIY3kjUE@l(|jkz58Q;R*wkDgyMMkeZ_py z!7cyD$FhaIhQWK}zu#+M_ij(ZzQ~OpKHo%to4ua5(MK1?w-sTH8`J2Q8?EWbmD#vB zr3cQg*hbGeID=Hm?!fi$T<92fwD5Y77B5mri;}M&V_1$)21l2X&9#x+OwRl~hT&U% z>vEwb;~ikzY#T{o&q)7brK)h6wYTCCYwnek3^U$TmZW<&(?n&JO~sBytg^PhET?D3 zZH5{d)(p94Rv$HLncYQw%%s)&O!s0bmfMxxj888ut&ia-=61u!j6+R9R?4f+F&<}Y zvzC8|wSKHv%iL^X%1oZ$X7kBmiq&uLF&ka)+sxHBv#g&7cQE+}YMI*_nasyYwk)Z# zm(~{oPcd76L8jFyO{Tr79V^*?BWo-#%jQu&wrMH4z!(Z+FzSFxpy?Je_wom>8pyLNCBciPr^PTgtD z&VDz-M9*^Ag+~&&)QKJ3hqn;-d8-Ha??x@o!M*Jq8E4jW*XX<$l*$#p<15>Uuq~(YWNu{GtAk(Y9XN=Cwv0t8@X?>6+xW5yQGedVKG1?v)vGY3;*$YGOaHNtpa-X{J+17idSx+y= zv%ihAIWGM>*qmRZXXb@ua43ffc9QpyO?}l8_9>To#?rHDHYUM~ zSuphhW53ZUW+|`A#)>ipDA?Z<92uO*NBg!^>lCSz*dD z7*=Oq9*$)_Si6K-IAeuP`oD+P!&U~29f^9(zWd)85>tD&WW0^}IRdb|swNiH6`f>R z3@>J?&D(CZu}8wlcRp*i^LH~tv-&AVtf$PBb$Gxom}y5St}SJ1p3>*qr%z!oH(kKt zGtaR)bbfP{2TeE&KQyvy&&6|FaV6_0p}as<4A&#~IU8 z^|^m(>&=9B8kl#@n^+3cPK?!9iCxq_&Q>t0Vs&j)W#4}t%yK$*ih1d> zAM+h=D(6a27CS_t-DXX!A5&LFo%KVunzg@fI(v$2KKuH<6APaOE3=e+OqiZr4(CJW zK`U3~OqSh-sjP(-fsEUB)|@?S7qL&Td}TBBpB4Mdm=iO-lgS~EKDE-3mg6op&|yQ> zLmc+?Ib5)vVtaD0vIpw|7^%to7|Hg=+<6Yc+@`5g?3rJevio<&u%9=)WFMA%;{-eE zvR%DSay~@T%zlP~jSPIpKEO_81>7@Waubr-p27ysvBo7<^x~7u8*mq+cR-pU^(Bqj zk>zdkZq_l@rz=L*W9kMhZi5&2NFInO1NcGHbky7#ZgzNB_8zpaJgE9lZb8mr5IDLX|rQ0_c79AI&9*6_p;c(6Rg~>{9ye_ zH?msTL9i0MhHW0Lw_sn*Xl1!8$a5->9%a!ll5OU(R$J|5oMwho9M*?FlX`lyhs`a0 zIYwGwKVxy=X6CI>4My_jc*gR;i%jX_35G&Y5##5LeaxDJi>)ixOR>i|t2mc>9f*UH z9q{%?HVS&a9ew?LhH6@#PCeas1G#!Wr#!#dVDY+(RIrl)>MREBr9X+A-DkOPv_^@0?%ANd!-k58h(MlhtpJp* z1%}I{Vaw&S-2DOymF`SHomZEE3T-ny?YlgF>M^}7+}8(9`4a#%wa%lk=}vfrT!V|t z?xH2BO!&DfmTPv&9A{-7LyUnj;j#P0#FEt8sP*cV>ebWd>gQjW(TU`xmli= z(D1@{ov+~NYp2MO3IX{wO`4R-aH3yp&c!i7pW)0y-H?&p2XbHj!4`9i$WDe0RqGN9 zls|eCO4aeGw%C~1*LIbo%(4W1Z-*!|BPKFlT}WJH8G$i_wQWVBJn*c)4w=PWCe~KO zaW(ZcQPA~9!rtA1Njy|V--dp06^$Qq4ZE%)t;|L=1GIB1zYS4~KEx2ZH;$l_uga0o zxk_l;6AIQj6ru{R0BW`LVY^iCqey^;u0Th zbm0Es6y#EwN$9QpLiv!(K={QTV!E3lp8MrE$e3MBoL4ZVguCJp%Qzjlir0d%MM4zu z_%rBI^hVP~Gthy69QbAbL+;d_v#?d!6%cuQi|}-A0bUoHj*{ps^iJU2Q5vescgc8*a1{Rs1o^$s(cV_MfhZZ>l#kEmzAWjbXJ>G|_)}DZ? z+CG8}qub!w^;?C9J*E(md8z2htD`t%o-^!x*2s~Xww$t(ZA4w2BJS918`!x(4QF3y zM$AA|BQNIwf+UGNGy)2D>j&M2a; zTN9wj`!^LlDvMuj^@j6TW>e?atOWFzSR!=AQCz%rzVO2F(_l7TN0|&<;l7v(VU*`A zsI%n^*jPJEc@=B~Vc`>COZRbMR*4(-e{BhGna_Yz--g0?^#f>%jXs`#MhUOH@Cf`4 z3dbq|%W$YvD&@RxIl?j&z~)+bXQvC6m?5mk+JsXM-+%!Ink`-*PGA@B*@(GFnHu@d zKxu~~(WewgNQnNz<-|9X&HjMqo)gpCY(uc)#}lN_@};Q#`Y$T+51Y1LWl1kIs3SYt zrAR_5hg|S#CC*8W#bs1HJa90B9{hX|RKGrs--SnE<&7WlIpe!zl*dl8Z=Dt?rL~im z`WT0vrcNQBR(yiRw~|3gI-o6-I!U*l8tz?1Yp8Gfh9>*mz%G2R-ksXGe;i~f&%o=%g~Vdahu@c!qF;xU zvFVK*pl~;n5*=2dp2)`{xabZ_Gn)`j`EnWDj*7;kU$y}EURBJ$X#=7j86ywhyWrc; zG?<}!86CRd&xHjtDC)I5Z1I|f-p{^=7VhweHPI)~bTGM|J>C+oDw)`MwV!Y#?KMy? z3PGu>E8C{@?11-DOd!8l1g?sB$mGo?Q0=J*yA9_Ezh&lOjy?yz*v^C8LN|CIMu_4z z11#SpgSXC*hWqV;@cW-`cs#nEs=qFU8nW(yKcouQi2M&-(Yb=n{M~SY{x_I^NRK-G z%r00BVI_>5Lq1(1Ou&=9ta0G8r z$wUDNy7*XVMN{CU-ePnvVj5m?JOT8EWP{P;G%DIy08FA(F~za~EiV#@_yTt%oS06q zRz@JPu$*d|z72q@KL|Y7fl6iP;-7gf;7`m4LgAYeaH)1gI}UN*`!-VU_oG3{-$bry>n5;t`y-)4YbTg&6Gw2@7omhT#o!Y^1x%}1 z1_r&`ICTl@DE+M4$o&3x6mvr!>@uUNg(1&~FjP)4|B2AX_0iP#nI_2d`x3Ne&vIhK z@)NaZSutvv{X{rtQ3}|ywjT{H&g05`s0NqwTexkHVhAvB(GY4pSYycl8%up;$B7P>!q?#8WC9aqp z1<5-AI=3p36%tmpwQ+Z~CxIEyH=*2{P^jy8gv+%zC&qCdwd|eNBtsGrvq#SnDSau#J4p>? zyDtwp8+~sTBxMrG{Q<E_a;7v8v^By&xFs&6X2|# zNWjrfG<~K$ygT@g%O4*AAB}0sq0x)jD(OWE7x`H9E*JcAv7#7J--s_Rl}O==gc{zz z27J$)hM%(66Q(;ZQ``Mz(W!$I;Bk3Z+pc+^sV&FvQx1z=g@^Wt(Yya9_v0uB{dJh& znlAiGnYNUoNC$atQpYXe6Nb@&r%6b`dKzxH(M&`$vcbEs44{-0hU^|{z|0qNaA@sP zuC(rB;%2W8(%Vu8EKB-;a)K+|eR&j+yEbB%S$b{jv$Wxt@J2%RZnxTQ;~7pp?8J#Og5t#HiSJW3o(SHt0v zYZ0xZjuQLE;o0$6oUp_O&$MeqM7am0@l`-{e>{l{(x!qpcL$+=T@BihRzX#sa|h?H zeF4sbaPI4VA2i;ZhdVw`71ql3pqRwX)ag5`Kw66uN-HYooIa}{j`Q?q)CP-NKy%c;&A)L6H2rD{{`|0_^80g< zS;u~$_0|uotl5h@^DRNW*B7cw{sj2$E`_N#MmVj}4sX3_fKi?ooV%=#TWK>3FL)g_ z+5AP7+OeaQs8ySdQ~p|_huRb5jDJpa_3nPGbkv3nU@4QDnNM)z^c1-D-Vp7v-3)GM zVUWw>m(q5jE9ju#mvG&yTX17=GW^f=5u@=?L<_1y=NTZ!Uo_6Jnhu&ngJAs9n8s|V&2m|h6PXX@tVv*UQF;w_fEL0z8A__dOQ`_tMxr&T2 zFz`?hEnno0u3o!H44jIk_UM+PH$Rq9;pe$%C99RXE|US?op;1WDNm^hzX%kyBZ*+b zM(X)!8+esE54Mr#(c$AyIHlfA)a(|TC=3LVqW#A7xn}|?sChlFAp(=JRCft{;ygSYa?)Z}mPKPpRi(3sS z(LtSTC{)8onmnMw`xE%9!#Fn8`VJon7eI9}mo$46PklJ|E!-U8=pEZT;~gRghGOqyq{FOL!9K-7G>U)+`g=CtA_pi)*NE zVnF!1cQ&_EC`RRa43Cis3Q~k^ATyl_WZz zbcGb#?;u_L6X?JN&h!S8PI9N%fF854;vIh#MDsiE&}%G?kt-{9k-caZuXkbzU1hnF z4m#q(%X3>S8YNooYN2`Ddi}Jty8w3w6=fxBYlKubbR=50MJ1 zl3`wG1HIwdC-P230aY(hp#3byh*cjxfhS}Qa&6hfJ@h^fY}}p*W~jD7)t7Oonp=a5 zmp_9Wp1Q*G7xEx+K7`P&uV~di*@66PYVeiU`7q$RDdm$PjRuoTaRfHDTS%k3jn;4-N;Kqq}S!a6{n* zm_H>4$UpRh6_yUb#dj0lcmEe0ndeOW>WhGSyN{!u>gC|VwN7G znc!Wj7fzE6M*1gjfd}#wj`Xbs))ASg;KmI!@Ax_Bwu>*^o!o)E&w3zWY%d&J5QE=b zw#Un~UZI)!7m0iK)6uRmCbqq3L42#uh8fHN91%id&iouaZ+#+YzPSqi1`A=y6*c^( zq5}SGNQR}-^2GA5d%&%m6X^YOJE$D8114Yo0Vjgvp=*pX?RC(L8xh%CLKWh0VU zld}c5J-Y;sPwzp~r5A|SWgH;)bQ_SYIi-;7?!?{IG)i#D!JpQ@Cpqa)!QuaUah{<* z7Vmlk_1@kBW}9hjYVC!0?7dIsC?2GZjx40_={I1+?nCE3?82MfZ^PC1C*bmhcS+WH zNS8nH!!0+yL5pN-vaE9r2>Yi*ACP^CWGZXhOxhB0{q1@xXUY`F-E|JhUWnr!sJVkw zRgVFepj9xQ(T=`vlf{;HD$vd(75&omg~9H1)L>XSmHTQl$c|cv(s#cG_GLPtdfygw z{j3Jc-fRFJpU1(pKmQ@)&}j7UY8v{ttQ`!_DTctqlW;6o#4CR4!}pQhl;3w#s=&+= zs5pKDbJW&jWuGeUct94SuC4*+&)DM!^(0bGe+#zPUL~@13GDoJ9_k1`Mkzmv19#P} z;6zIa8d@Z3{`7S>xHGJe)?R6&%mPAT4ZH-#o5rXs2drTkvlMOJ{g?P~whJ<5Cli*c z2e9FXY`9~Y8u!yX8c#h@h34y>K-P61z@=anWN>Oep?h{Ku`RP7I+?ve9S6=~XF>!L zyn>dIjb$yoFNFALptZxQHyIRN%z29!}fPD3v@eP5RR$pqE;57G_l}C0&1paKBtXDLi^@ z3s(JWOd9X0N3Dbro@r}FJF0hLCrXu^)8~NHn*8ts%}wO0vQgZV53qBIB{u9X#yJru zaIL=zz4W;OCWci>gFSgj`qxxS7?cCMUM&N3LcWyI2RP7xT_upf=j<=!_WtDRLDUS+~vF;p58l~5{o$S&4Wd#FKw7`UC5vm z>VvR)-Yej4-9$9{lu#>T8j;ccI1qbS5iX)1qKPx|0F*32Dh4w_vh-1yeL0`2xco6v zYVja0RSCJz(kft6U=mVItU(GL@6h^>7pT7%6|uRJ25SFz4bOYA83h;?z=G$u@YHXU zr@SQOVAm@HL_T*rA<3$ONTnOLXlde3y+Dwg*^Z`7n86uGt)ZrMEvkKy3sZ_71EpO` zgtuA@SbvH@VK0^wm3ejOSG*UL@75s7`7M+QLkV8I;t5vdxq_TR0$e-9hHG!-aWiJK zz-qA~d@<&PN{=C^h#G-&Nh}d}sS)+~<$|X^71(DMg@3=u2OJQM^g8bWrew0Efm;q< zd|(^i@F4*+4OhebwH!iq=RUmsd;}$s*-3dE+5vXn+>OI~uY!Q^+wg7de*DBK3DvG+ z(e776xW^wYA^RlrCjZk*EH$V`7ys_W&oVT~2=5Ir?RhlmKCuL!`^ml~D>fV$+73Z)K9F z+++w-w{j{eZVj+m{+NpC4g~6^4xHFk&0x{BN~(849ep+_13Gi2phbIyfOZ-NMMkNp z<8=wuv3(Y}dHN%AF`kYM2FHl8K^{6YF&6-4Ec!i(iFw3~A=#CusrO%c3HgI^;C%X5 z6x?(P&C*y;6W_X+>~3_c1r_Q!T`+HzD3&6xK79gF%lL!R!^Tu(u%*YzT}%vreX?hK{NDu=ptm zqWY;ltMWJvk%~w-s)c_XUIl+@PC@2_`FP8NP$K4f8gL5y!maN5F1!;m177soiZ|zK zV9#n5k@2!sM3oj>Caw?Hz%UZOHqgc30&YuG}S`|z2BMx@(nD&e%EPu{?-=Z+gSA7I$GAq&NuTI3- z4nJVOtc|j)y+oZ_6Uo}zSzdP^I&7J+=n)XUGv*taaSQ`x99PU9IR5ugz zZd!w#Iyux0kqKIoDFsKGC7|WtC}pUA9f+$8&|8IMVExt==2|NdT&2J0f}avh>3&FP zSC_&W@1LWCkuLbuyb^H5Q5SvsiqMv0CsAAdT+H6P07X5pfj1Yu#hOcG!8xn#`10B# z!1>o|)L3T+1Kgj&lF`dKfAr0;uhSK^L7@5=<=l$>mMZG4-FjDF|Zp2`~3luTU;Dcx*osTDMM@8 zCSZSi4f3bQXW`S|G@f^2A6oXti28D_8lAhd5FIN|gVye9ykOmM`svV3ta3MyzQ5ZU zUVC>LR_)1!-&K0(QNN?Oa>^>W&QgmkOZY9^5fca&?KXk(EF-A=xtYG2v4{+dAAwba z$*@a98sY=Wu+DfT^gdgR;O;WoihBZXe(w$fp9W!((HSfnZXW< zAx&J8QA0K+Q(#H)JY4*F0s18(KxStokk>cBPtBvSzmpVfIyXcpyiEnh+D%A(&RN_O zv6!+vD+6c7l%tl8X3ASG1Ft;R09G#5g62t=(4D9n;)Ah0F#UcU6l_t0FH1{W#cOBd zS10S>%$$De$CAa6vXVwull8#md!Nzk2W{Bq;xuUggJ#WJAPeRWYvI+Z%R#2;4ESh$ z5>~0!1UB!}PQ-Y5{Q2q_moAdSpS!*iMdvMruUzu6XY+jAQ~3ZXy@~?0Tlb&< zLJlwMz+85M5C(SG;ccOjc- zdcB+u?bO!LE8L7oi(PD?ihU05Hakq6^_GG?7fz!g<|gj7Ma$8@z2V@(&QN&ZMG<=6 zFzIWs1(13WN`=}6!*umZ>aFM!5!>qLMVs^Q1XeR;IX63b4r2!w!JuKrUjCVr>MeS7c>$29OOD(5f*Fa5`Dq}q|AsF>X+0| zPv_l2ykax7_Tec?x^)&(mHvl_nMvFoUhlXYorjU%{K3|rE28o4YJF5wtO2g3l>vuk zVW>qf92VUOgO@JKqfFlrkW*HGY8B<-xqrK0!zMp?A=(apTg}6EikF1Wh3nwd5L2)+ z$eLK;D2MV43?bj)5wcJC5AH6N#?|s&$fLf7EKJ-6PhKmdAE+{*)AKScXmut#8UxAF z>I}M@Z`)_^*}{NU_f(^6jc4KC%?@PoqKCBq7C(+{nK7aKPeP%ehe4yr8l8@Oz|oq2 z98EhqKon{_6Uo0!k(JVAVs+*wRQ3G_GGWGp1bP;Abp*CU(bTUKN`V*YwZaOqhPRE^Ch)Ay#+wKhv0pJEir!jE}Hhq9u}CZazhtR z=0Etl$ft#-j?m|kF4L5{vAzX;T-QqN{G5f6^$Y4h$Mx`$Oe6+U1IX*y5*&2qeoL(E zX~J~x26!Jv!K!OwWc=A2EEt%KHEl0~XKlZM?Uy9fcO1YB@fW}!cW65o-$g{Zw{mf( zED^Tl9NHvj4aKqroDeN#49G=q8q4IUnD|TJZ2EW0t3RIFUgSY4HY|ytPDYLZ9&HeFcA<#P6iF6+s5TB>l zBL0+Aiqxyab*g5d;8_MFhWzoRTu-bBqVWf_WOQl{35-_`g1xz$;nqFM*h4Rn=qF1l zBSthZmVF3RpRL0GG_C=ekQ7*AtWG^wi$w;k2=wIKe_Ts@89Z%IILz8-3jFAF5Q_psYzeqmEVq+&>|S!a!OL(DLJju2`qEr#6+iP(5V zhz@QMuo@IH{o@+K5|^=n1UNS#bEj9bFLxB3l{H_M{(V&kse)wR?e=Vg2>-!uwg4z zdvybTFhw7Q-L`~%j>oXW)oS$HoWMVPGq?vjwsQ|u+Q2z?cEE~>Ce*K*Lfom}hb&Es zfl>PvptE=%N=T7~5>Gd<fsW(s{^Xy#`UY!Vw8{dO`PhBaZQjnW)G<27nkQ z;py2!RLkEYB=ZtL*svP4Nt^|I`!hhkcQ0jGZ3&66O<+og8d{_6jp{vwM3g|9Ug*f@wrbfV}vjtt&g_FTHFYc}uV;7$6_UmcOGmIbfqc@LQ* zw}JNNg}uGFuEu=mA0x4={AT)zy!O2Y_W4g2N!teTZoe_4-fH_HN1bXS?Ug@rf9}L>iC5y*-;e{YRmZ4c z9RZNru1y3tsGykdm%t9Wk0A8|2fg{?PJEIvr7r!hMw`#{P%C$D26u`MqR}m?)ZC7-}gWP zOCCntw*ZU8(|`kM0vAb#15aZSG|mYJSO2TRQoTx~jNuE=$vFjYZVW~`hxZYCS}x%6 z%Tn~em|d{(zCSuNr5#*&?T-Wh&ckl8ogmTl8crF~N5d3EkN&%l+{%_huC@s^FO5gM z-Rgy<_Wxl-^~>X5n$~#vHH3n<4N=wG!qKg&)wt~TW$y0ly09f=G5)8r7qLCgCFnX=VuU5X`J*MU6(sRQ;*jfXaa_wmZG zcDC!w4A>jt1h{MbiJI#woSbcwF~{B!V6{F%RJXE?R53h6j_8%pyZX1%k<fO^4uQJ7dh3Y(fewh=`poTJmyEz z-l5vOjUC^~-dq>nSgNvU?dS-;_q?36bMGSQ-#@|SfI(Wrq?IhpKFwW6n&WYyB3k`8 z4#c5Qkpd50Sri6RjGUl?vJ-L0s|jhI zP=lqyg+zVYaVQ&5hTf>ZM%_O9&=;ct>Iu<|R9>t?V;}>wU#>&478{{LJi;HRdLUX9 zhBfk-;PX!=Wv`t8rE;2J(gG#C)Jzj>uQ`jJNK7E{rVkYEt4D_xAA^g?W^TRD2!XtE zLC+B-T>R}HS2fU&y38E}1AW&yc5;(goRK-)b2kyZtG+;NxAO;)G7eDpTu9rCT?vRc zYzgBmcM?T&DCjuq0&GVEP!D?$Wer{jUDq42{kt}7ndJ>iPvoFr?oXg;wG>a;myLBc zoW*+o(y{IKQ*eB?7x&y)Htx`$gQT;&F zod=isl)~$U%O~TxBKoCPF5Y^?5_&94hTW$%aL4?UQ0h|*(Ehg*c!g}C_4)dwmh(xd z_Ma8Z`(8w;+NYqfdQIH5GJXW_G$G47OK?*9UgF`lU>Kt*Pd-n( zDhxAgCtU5!LE_(P)E;CG8oW;nZTr80)eh5;#C-u8Z3qR@m95mna%T`3@(}5i@OsCKVCPT~h4#jPCD+cP;%tPPIQgJx znGSAH$w0lES5l!9lQu@x9z2~YOKlkDfq|9*r157C_phcrC?(U-u!jxxJNhGKX7`Zd zO4pzn?pj2VWG^CY^U=@&IZ!xmU=kgWBZdKJ3}){nDb_FZkne{~#u zt=@>f`&|W3`V(PS=l>YG5=SW7I4t*7A&H78M|Ni3_nldGXO;*_hoqEFDoMH&T@q5{ zELY{uog+C)c4wCmNl8MKq>@fP-KFU8`4?v1>$!f9CUa&2=bl^_%C7wLURXB=XC4vJ z^rA76|NiJi-((kH))Z8-3)Gb@w7Zu}-Yb3-^Yo-Gwj0k7s6{JCaGAgEWOAqOvskF)+I|mpo8Mn>T3eM%vsgdpV#pB+UC=#b8m?p4y~q} z*GY>i!f|0qWitC^-C6$jx?QYA;Tnj zC(`Qu#cWaS7JB9Ll~wVAU;N-b`pgln$#lcvV0z@tUD|5!rqG3dh1T#Z=l6b><9F*` zWQQGbCUx0!(RI`!J~Ho(eEwy6hSFNTu-PavU)MF=ts=~9M3&biu7UC&3AK1HRR?%)wXW8{@ zXNt8oz4<>5u*}vBcd>@aDY0+)HnGB)NsJ#ajPc6iGfMt5#kKMmn0k&6^ZnQ&bJc}3Y z6)h4!|8Y$yQ}bzp=Q&H?ak66%RO5_GsR>gfb5fL8KaGC$cOAR<%maE$=S*hNZc~P@ zYQh*a&JjO2lOmix@r`;6*6=?%zviWxbPCNTJeIa%U9oPWt-wtjAy!x2EbiD6)~ftEiB49BXG3-bxBP(BC5pl?s)^Rdti^}`VeOAq754_0XyRG-)lk+&Nhx-A(`jK2wY3wmpvvCXE z5#+|(5UnKqX`ez@F4w0wZ=S=yjB{w?=LI60#xDK_4=sk>7*7i|;`siTA)W63TlAqd zla{&snhyS}&F9rzVb@trXI4$>6J2u17E|Y6(lymQW-wSoe17$Fe#2Tny56Ojt_YW6 z`imNQi8hAffWS^h(@MZb%ywi7FWR&ASGLgar%o1c|IsJ*ufv$1PWOc88Z~L=iS$JdKP54Pi8R%tkDdzVMU84P z7es07y%YuUk+G8M;RacY&#y8CX2rRZq>zbs{gT^)(@FLcaoqunxy2hK`(pn{ss@V$ zSzQH!w45mxLDX)^mfd?K|K$f;6wL{#whPaaytRp!G;bG3j9z9-c9@%4ynm}{VP6tr z(KeAEKR)tNY)`%uY$$v$Ftan_GnQJ|M=D4yuie8UP$*&`pUKs{D42cB3X$py-vYO1U}v6 z?>nQ&MyRUNB$nkSChTccbxpgQCu7uf=9=FX&aZE18R0mxOEB zd2~~)2J`G_9&Pj~j|shPN?(6>P7F7z2~-c6(m&O&GL0f0`$R`&BD!@(Y`;lf68OfG z85v$Cx&+GUtOpy#dq^dg=K8c{Ev-a|B-LgTFxFEUm+foUcndLIL>@KaY}q>p`~~%u}K`ByMoRd z-Or3K-^jd;(i8I(o-mCO70goN1g|XVF&*=Cs@TZfpNX5?${bHu6DX~?%B=O&kQ~%@ z6uP5tj9mFG`po0g{EaqO`I4LFjQ{9$al>J0!SwRQV#Awu;zkQQ3vVA?0rObDT6#ki zbHrF)f<3$;7#Qpl1oxX%*Zql=-0cSwJwVa|vt&$=Po9%_L);>F#WtpBgd@!+7WFB8uY?dM+NpW#e+!K*K{6hGpNei&O0djy!#pt-=9LyzCVXF6`jfPM^YbAALgy)SB=om277x1s@k)ot8^~Jy1*Mshs6~i|-Q2 z9^Oxne7(SaTmDP%XbmN~r7kN`rg{Vd*-j?>U%lk5>Rra6C00VJt`(HqlnS)|%ayo= zLka)%xFkcmkogdBqOJP}Us**TS6ncTzkx)ClMM^W^x^&$N2a1xWM>Bhg?3*fARK zcva>k{o>FP_S&$nNd31WoAvF2sGgg}ZgZ0G0&ySK zzSoWYiLIn7f9i^FEj`CBK8p!QRlo6(^?cwXKNS;+bZM35 zI<{)nTXwTa5P#i~1EQDmwxU_1hv~OZF3>0I4zqSoJNfatox-w|WWL}C*eRLmsSk#=c%4(;w=;+_Vk_)^lS13FZW{`>osI2ni090eHl7Ncjr&z zt^FaSuiE8`dd_f|8Hs)Tk4fLyD8C6$(uqmT#)uMH%Q=m2_EnnIcTg4=6>MkYziZO- zO6Azo3bW{_SXTIKP)A&J$5@1$2Z%CLzp?wyE6~SpycF5H8;JQ~R-$tqO3ab10&#}! zN#3zyW9DMmZT|U=DfE*k-s~c+DSXH5S3;S)dOYm0v$%Mt9PQpbnXxkSVrPuJWt|p@ z*-h@ctnCydTJF|ddi#hc{W!vxO?4RKJ8$eVf9$r2Klj9MS}-ucz7AT*lMS?I6z#i( zlXNGsuS+}l>yj_?+Sw*{lJ6Ae?`jiT`?{t@@Z*h=1}zy$W%^#hx2=yD{&0fiNn;c9 z*!cPc|3e7mpTr1?{#z?qOUX!-9`;JS*0eG8qtb$bNu82RdnpT#jxlCIm9)Ug`5jY{ zV<}kt=Z>VyCQ+gw+aWo#u~d+}=``b!=qs^vUm`JE$cSTC+6w;hFH25ZNAO#0beTOq zl<0yU%}x=oq=QXcultv@2^IEciyF1-*t+eF{Oi|qX@R$jc<6@*8wX?9S1P1v%QIhL z!IpLWxIb6uYI~ z@>Jo6i95ba2KY5zjk{9Hiy{aqsRC@G_5J#czeCBqMG?_=d&1v4cx zgJ|&=M*$mlKs@vI#3WJWdWLsnF@5?AlC1k~z-XTV;$uFe%zq1hFeS|q;?1?K!iJ9X zl97>7+RZYW;Ur2+%5E$WWEx2^2jVOlNkA53)c=T?q9-G1tW6PbRPPYOmQM@`8fdH3 zN8;Dlc8Krj%ZO*!-JxgwwvzaS81duOYUnyvnYDX-O*CtD8NYhpT;atR6Ikh-hVbSR zeYPwpPjt)lB>gi`L9E$b%3gkTO0@7{i#heHU9@JvlP z>B8}gbmr|l{Qu|!^y3`B^I1HHKWoH|9xx4LYp*8oE)+BhJ%?}6ryqFWvL(65sl^4< zwdI4{W*tJbPlouOa~P`IB@mMPHYj+HJCSf*6KPlvqTZ6*;9}!!v}IrgrV!YGJvpg> zj;1xCJtt$~y~IrXwqyf%W56P9yjhb3&A$Jo>X&+>PRnc5vV;nF z_&_{zHy$MsPEz&qnUo)KinOs&;AXFNL)%B%QPAP@z!^ zgEwb$UvzjjLwn&f*tU@0i9Pq`@8Kc*6AIGElxro*3%U1Ig$5OmpPrph*q~jssU>D8vQp`}~VI zR%1i>^olqG=5L9ml42~L4Z~8a_rYi3i{RVA7);acBC$_*95;I{0{u_5;k!dKF#XVL zAn~;#3`xC&fB6suMJb2Cg!ve@M|Q)h!X!MGBZXxP6+m6`OAxwVfKP26#(j25qZ#ww zf%>&?@Ll#<#9Pb{^mN@Nx(}+M-@ArEd%}F;y74qTXi_y$O#2Ls#zVklZ!^ffS_zw7 zB7ywckHj%Bik9Y{z&1%KpiJLG&@qj{?oE~utTELnMjWagP{ks;o>v*iA;$LF!DGH{h#Y`3iZq^dU4aX8*N+S`m9XOTiabBnu!QzZx@Kbph`slxz+lr}Cxi`vC zn^ZpeJYNIp6}Y22APKEksiZ89Penhrs*o3K`_Wy+K=?1Ag^VPUSzHMaKiCWs{e|fM%9})gcqytn@eeK8PymVb zX{g!!5|-R|374k2z@xn_2(9!$-}ZW8hO?vb9NGD>E;A9|d5MP`xaJOiF}wI|5K>NqpQ8bym$&_+ozs zvFlk9m@`EJ+bR?>r|2Wdb}k2HZ2AsE_m!f%xDZU)I~5t1zlPDLL*SCqa-^sej8uAi z@vY1D0G&BBvdoSFBrhht_7OX z7tnpz7i6-oHM%mf|G(eZ$2~Y%nmpQ}!PQx_4Md%YM+c17P;=&bQq0rET)!_;RCj?Q zcj!qzI{N7~x;rC`#HZ4N_7<+=4K%hud$qu-kRkQe+IK*=?Nw1#GyIKP32(wf=xhWWg}dD z>nm6lG9TWXtOmUT8bNIPTr_>6jUa36I)32M7cg5z2AuVif*Q77K&oLD_%wVPmq}QN zzN9&ze<6p!vDa?I+q_gT`gcDhWiBD^`@MKg*bT_$$RIjilQ1l>Lpu-Zz-X!u>XrQk zg!W{De*XxIcM}7L7f~>Cr4XCvy&iT2iqL?@Iq;l)2bIkbcxf~T#DwjG#mD87APCMJU|tb{5s1P=mWg*TAQb2}G>UFv=*d=S*9b1iiW{fZ&T6 zR{1>*>HXG4??SEM);pU3u9^tY)K2(T_c+I4Oqx7m%}4$%cIfNX6KIk9V%U@(V9rj)PDRlVy#J*u)L9g6ZudirE4)W3+-+S<%D*(^_v z94(=iozW%TobHgR!Nb&t9XrU#$;D)-poM$+YCiY-N}9T|^EnxH(U?p+`iMMfD#w-L zy`moMZQ$nBsBwG0{6>PlXv(fMkg}Mfg?JvTsqhO%l;iIR%;`S`RJLO>F%({=P~WUGj!YHW;9>SOEQsWx=nZEx<&54H#02LdTZR#Rq3a!snLP;Xmta zj!f}w;FNHcGdjndSlrSK3Nznx?0+VL9^3QS<`|bddFa73^w{fh+kwWPv4xhQd6-`Tj0+ zWY;3tyj&I;naqMq52T^vr)^-3+zd2j{UsDB--3L>Bop5Bkw%a6l>q+o51KrF0c9Ry z@X-?pzTv###9mxKarSN@F8;`!?=-uOIQIP(Hhc0V&^XB!PczK`vR~9;$@Fz##=p}* z*0BcvbJ7yiT9u1E)_(*t&!&Tl4eL0)_wR8&eBxc9-e_yUM?dBe`eq#{l}ocz!AN_C4%Ahr;V`& zTd?JFS10Zj4XB(Z$Jtrs1CI#pU>3aeVnY_FTPTH4`7`+B zo+l@?GJ?P=)a4L$Ypez00SiJfIep!Q^X&_WVf#^hs;dQ1T!M)(O@GKsmA`IFwaDNTR-h{L8ms#S}fHXTAyj)SrfyObJDw@H5zjqcX_8 zqYjUfdWvlw)FLcDYoJJD0$frn=PtRmj`C;>B@Hv@Q)$<^I$$oWPrH!a-+ zc^~FbVf(gH(nY%|wUL$NzcW50Z}<-SX6DSDH`AV*x4wZ~BuXMP!b9ME{xs5i8IQZQ z<_SeDiQsB<%5pzyZbhlF)2WK)IBGIF4?c8VqQ;dCDeo)(c%Ty(z4#YKxUQLxf0An= zjyQXBvPV^j^`pxP&NTr#@jxE7UQdN*jm)4&zX>RI9)p3NtH6`T3xM<6$wc>sYdAJ& z1;d!980T>z@q6`M+=*6(3oI9*CAVh7mOujKYc~?Ves_UhYY~)~uEpk2IH9XQKotHx zkIh^lg16>rg9z*-kXW1|mbP}{;r<$Mi+w1*T(Sw&KI9Rb&AtE)Z7(#SmrM*n7Q8sr zhO0%G5v%*G;r0urC}X`f1T(If4o@cujcHfFMo$|;Wf0)fxa4$XojZa332%`*CgR*7mtr zx_gI&=C~IGN@KZzcv#7{Hfs3cNWfP}ch# zUT`iJGq#(_(SNrVY8G4qf$r7#L4R$`w!xFw7Zd@u{&s_gYgd7#lWmB*8`csF*G&Ua zchlfe<*A%A(W8WvWET;4Zw~QS90k@keg#E4WRdD{ESeYa0MGZ=2J71sz_g($D86gU^UFRCUk>^?@iNXOyXX~k&MY%g?(8;0?9BS@R@zkMlL4@5u)!0{uf%$DhF(yJVd?5hH+~j%W~)FHB&t6sb(sT zQPe7vBh-A+elvNMKr=u8F3K_MJf&*Nr#i|txuv}_W|9L%+`{5LW{NSB%sw}jldCZw zZt!gMD;y;!XXWTY)PEqoF9bZW?LcLCA-T{z z94&adlsFXe5qznPK%5ijz_Xtr&_sCw`A%XD5%Ue?p56n;9+;A*`^q?~TbyBOe+YQN z*T$bIiqWAlcl3Q{9BdT(6K18KfL{e4k(CU-PvtLq(RL1DMfRxFY7*)8cK}RXbqXc& z`RH5dJ9x%ipWLnU8a)ct!TWa?fIZhLVDPpr$cI;i9xWD;tw-*lo~ON}M$vgrUM7!R z7WfSG5EE_}%kjt?xqdLCN#8JMjfDW^6uQ>pnQ-eg0B6Zwfg1k_U7GbmpnpDfBZrP!#$K)`P!`+Jg6e4Gic^!*-^jzh${{hiRIh=8Kr z6IgEEAsEuq3v9>;^y|)2xEKU@#S2Q>DhsQ zC~O9CHZ($ND$sS=TaJxtImoe50%f%n;T4;Y*1K(letS!Z<_b%6C22ZLzj~Xf9(E(| zB(~s&=GACP=Rd?0PsdJ}E+)gz8o;;CUw|qsg1w*miLD3Iaof^N+?y9miY6t&e;tm)c@zw>cK(t^cco!wJT!*MNCy)qTqa3_)1#nl@5&6;Gy>Uc_h`|DFekRY!(q61+WY20D@zjw*+z;bG4O$YJqHpu}_% z3vnJEl9`7745q*{ua*X!3*@0`1`g6wfOzz zBrZM!nO8S(V>4ImWzpC0%n{1Y+F&Y$e~kqESZ$dM9@ zU^1os2Gmggjgsrm6Gb1B@Bsq@vP8#|EC`TA>to}<_7%(Ea%V%dFBL~Sg3Mu~<8So+ zUl^Qn;4!w^yBZvw=Lp8syz!6ctl`655y+lzi4S%96K|iqgXt6NcpL5mDr8oJ44(x^ zSMr0i;g%oR=-vgVEk6w2$2yT<_X7CvQ5){Czydy9^%Y!kolV+!IFRxcs_<+;B@kRa z1O*aZ3#wo->x=fVq>p;U^rF%TU(OIGX2QU~Vek#kaYsM}5LX#KWZ)UTn_NS?b5 zy_r^zrsm5~8r|~R ztY{!k)y^VugR_K(;TaUGe+&kcpGEe~%IJ=-I%w1T4@f_aMn?+TL7xK`Ji3+-ia$(X zD!)K%NGlHweLa9Ve~cq88#JMvtqq`|=kNrd@E4_AdVp&sR3RU8O)|aB(e#?hYh)zq zAZA=S3|`Ygupu@d->&(RIA@WL89wKe6{iw_XN3s0UU~wzyX-=H3=g4WYu|#QIVE7+ zvKNrjPoZLM64W+~B4Rq#O{J)NM8#8Sv@t`Fv7 z8hezHtztSV+5QFM_J6r2vd>d0QAg9Fdu0gkOM2!6XEFSNwDJYLaY*A zo#1F@fWOD<;NmWOqu1%p(0C{TWqz*$PRc3J*km119oRus>#G3M`y0`*`M=@r{y>xP zb_&n^g26`iI+ipRy8 zg9dHja5Dy$1!$s>2gk9E4hMm%t36Pkk_8*92Z@lZZ@}uADbOk%h7QRLwtDLZ+N%Y_s8->V&U6>*@CM{S)_MJJS>xvAj)tC zv1^Ymvg}~dme?rB{4jtwa&tj+u`|k2fCbM9KzX+hfQ%{HrWrp&VcYh5uzW!yn)*5wee%|XTX+8kw^jyoe;Ij_i+>bT ztdBL!Z2LpLzId73p16@Jaa_y&^*n)^zXMUt!V%w|9BjF9^q=X3Yr z6`XNVhxGKk!S&cQPL=&FqspgCaq)yyXan;}Y?l!=GxR6n&pf5#-!I@=L_EM`W=6t& z`Q?~%xD;wQ)d3vIMaEQ2G58;?8!J;K6PW^zc(C7W1_LMi%Lkj>_kt zkGD2l@Y(_nG;ASfkB4v^V%|cmpGO3QD&T&rKcRlj{ite93aq^v4encwf=opt)YqYd zR~DO)*o$}+_b3s)>UxPjG~~jH?2G7M?{XyjpAD2GV71e5;|-SZ0~ZJONfX?)sgzm3qZ+C` zKb+h}#Zh;)($S5!wWMdxbn^J=MDBpaO$1i7AQ<$WRB*CFmbpFXWt3LwoULFYiTL3oyn}W(O+(LSRjc~t5BH?*? z5Ja_eV8os|q)>ewK33caCAF%s_Toyya@I?9#mWlGddQ#_^Q~~~m;<3#u^vQ5zXyH4 zWs#oTiurabO6ct%PHJB`MO-V}f#f(6;>bQNP&p+728>veCO#iwjQeE#o=Y?F&uJ3c z<9rTmnkS7;&AW%fr&|J%<{)_aZZ8qlH8KADf;<)`xd*!pBzbd3xqFqqBAtDuRPW_Eq;ujE z(x+NTnL3>#cYiA)p9g&7($`zK#jCBUuTC;l*!Jb*z2h>}>73Wpx@~W$XCBYF*#(B& zp1YS&QG6_lF^P(FkWXxD(2M1Aw!m zBepp?#%2BMKrW)-p(Ob ze@uaO7tdqnJHv_W6=AS}I}4nBG>!1Oq6VV9>mc)dfa5ZG2tV@Z0wj0;#Ev!9BgY@g zpy9U~FbtXl-@D|49Y?Pqy$4s&HtPpOYJW2Lcx4job9oGXdh$^7>$s}Feom~r)#UZ=^%MFp#g#H#j8445NP`_TNxhM# zR^4;wo=C{2sxK!{LWP&qqW72JmIftC|Kc06!r(WYHRwpyemur~IbeaU+C&kr*X~w}aAU$EyixlE zgfpE`$}$Cx$MR%!|L`2(ad#u(zn+1+?Uv#P#;qU>wj#=^Wyo{Bal|{#GpK264Z1Kb z9gcnUMbiIi&aW?+Lb#lcK{v0k6DFM#zL;J5Sgl_3wpH z;u2xH_{dtqDSsE9HoXZm9N|JO2MP(@)^aXizE9*YpAWa@E+r1TNr88#4iN@hi(%vR za?YX3pTzsqQm}Yw6j+v%iipRF@GAm?!OCk-O}gQjZLDY zP8pMtdu6G_LMJk1ogt~dDF!`Okl`jq*u$*&N^(!(Q*xm95}8zELK3v63Lzbjx{c*-abWNc!sV9_JyqXfnXrP$>LehhCiOhd_ns~8d zGNo90hRiRn;p9Ct0C{fjiLgIAuxIyWiO$R_Q~MYed{8-xjnQw2mDVO;O+qc9TmBa$ zMZE%-Cvbn@oGQ-8OAm-WiWMLyMhu4!nFI1)HeL+Q5K8sNKtvB2 zc2WqI)3FQ~2h7D+CSJh)Z97LOt?`FB+Mn@RW!WHrxh{snsXZ%r~Xr+hI*i?dBNTc{BjOm=uM^f0rYsnZvoMsYH&&p2J$k zFhCvmg5RI3;NSFcnEdNG9?j*G^cF)Px8e)(gBQ{IcV|(>x_UG;^*z`lzXZ%)69wh# z649o0!|-!eA|BPQOw@Y`LFMdFsNxcdCePjqw`KLCmaCtzou>DJt8pp7RB|wk?14){ z%8^U=5@hMFPkE`{M`Th05vgiMDc(1R`{P~U_y-60CNd8N*hr(I3H=_0pCdD8(PYy5 zMza0-DJZ2;1{tYHGSamidAH7iMI}7AVtWxvSgcBRP3|V2huD#Y{<382lR1ct&IiJ{ zRY>#87RU>H4C*S)$^K7C=#RSr=KM*4EGxDLd-nxGm9_tfvU@G2^L0N0zgHA^9b}HS z_|qJxL;I2K(kDneelzUvMd*+H5az0@0+P?a0rmQ_WOlYTx?=f*Q=CiS8PsOjH|;qF zwr4{3)?(-|_+f%$9VZUUWnodmM3h!`*EG3X2WT$Y3TxITVt&7)P^@1k5!!ncxQ$MR z{hMbKPbXEuQmON}Z^JpX*jx!5wx>{saoB%g} zUx$5|^_93f>j79>fpE83X@twc2*UeY2Oxc3V|BBBgACUp(CFez*v=%;l*6e&-}D{P zHYo}W6)zxOyILW#bTTplDa6^f5wJFK35aQ317AMvLuE(aK-KtKxV+g9d86iDxZO^WF|VjrvS~1jWN5jb_REC#YyUt$vU%J`zom3JY?3tSA()+mCcrf z@8X*I%9*Lj7@5siImVS@BF*514C>C&ddi;JWR|y~-R$C`e9Etlt%nlW4;^GC7VC?b+;?dnW zu=(gHwz5+LyUQIX^xZpfjmkw}+D`+#zi5E)_1FyQH#t!I8wFM@iN{wy)4+qqb>J=g z5aMEFEC~8?84OxI$HKYi2!$_3XzTtk;(b*YsF6#DS;;DR%M)w3n&~CFX2)SW?N#8x zJ1?Qjw{7@0g_ERGeKbsZ{SXF4+5k#c0c>oUjdtHE1gg|Ja`ve*5Ioy}{IRj0B@6aImP6+pEw2%S(1!{oG- z(8!Z5Xj54h2<0(k@~tbx*nD?rUVa-&Y_gzi7?0#V6p$J1M?hg?D)@V|5jMW_M_bY* zC?Lq4Jg3}@q=SRWUoB5e6HgAJ*{K^rv@iy@UGT^BweQ4PH!BBrC>EM2J22ewxU19= zCk5^r*$Y(bh7>CMLIHW=LFom(HWmRl572-m-i z=C0{S+#P>d>~4(?TAJ8JXde6k!n?D3N5Ef_0GhjW zpjEdtavD}AA|-slJy!y%L%TrY^$S3e^~tx0!+#O7iIVp9MIv^eZ~vVhBeMr^Aeq3ZQ>1hq!;z8uFhU2M_9{ z$TJyX;Nz=AG`w#Wc|5lm%yKC~Rv#9CD_4)>mHS&z^j1sq)2pvAMDYtzcWJ^$WNLs! zG7&Is#0GAP?L(*Z8SJoTAyKqe3g#-jhd-jLvBS4zi1{5UaBM>{CJHYFb4LIg9d8D> z`3hp6Y9`l+t^X6)EME+dE;x$jJ{v*p5>v7+@huY8YLVHWFL8ouexQSwmB5h|nfOi1NMiGk zv#6IyfN_DIT)p>p)ZgYI@5h-~Hmj zirW{Fe8yxlZk;NDYiuAxUOXV{Rc>G=#?8dZPzWx$QPAKx0G-K>*oJ#0FdrKvHnr!2 zw!kdl@o*(^XT1yfd{79r^p)VsscnSOvGqhPTn8V@mqT|Z68{&!0LTT}fuM*Jgm{Yv zLYD8L>(fyDGB=B;@cBZ#K5PgZ*4}^{p*!B9?gx{q*CM4+V>~u66Wz|4@Q@h`z^0qS zV3VON;gMMhc^)J|zbxz&m&MyC;~?HUD+Yn1R6PA2Hr zn+|Zwkiw5OJq0ce1S;583-3jBayPW@rSxo5$x6qY+^{Yw%7nhc&3_wC+PXS&ogaRp zZWkWlzR5`Bn(ewu&Gl9_JLfh;mLvlz>SP7i^MH+6S7A4K#m$2H^;L#aPHdpkLwIIG zRyo}FoJZWD?z`MsReI!R-^pCR|7*Jruqcvl3rHA7;t&OtAQGm#tE#&P7>1yzAR+=v zHjM)ef+Ph*L03T}tVl3|n81LTT~sod<07JB&LXD8ydo;%>w#r|)2eU(_xL_&s;lm; zIk!)pdwaTj=Gi-{?RF7*9%n_Lv{2w>A56l>#d4rGnMov6rGWapDR|bblia*Gb*Sxq z6Aw064)|$H@psB=frrK+`1=fH!vz5o4*s>^q&AP?(Kt9gNax>!4j)T~VLs~GR&mhz=XcaN?Y$Q4L z?SWF`S)%BT}cPi z)~-j~^fP45mOOOKsR3V|c@J>UrJ%KGX~gKNhcMH6D7kIuX)pv0fELv@Ft>IVnH0B* zJNs!R956r^Tum0>i*$|2=6gP<(_j^}AD;u}20aB{wTfs&c_}Wt$cxlIlZ`5>kD^h# z3ea<2Gd$C94rLwjLX$*t$TLosTvJkpvd&%vcZzdC=b&|PUm!plhZZ2shE}qqI1f#H z#iC}J`&%}@vn3~0=|Wq@Hq3ed7Ln$d|)X$P@S7 z(T#YVJV%_M2VE_Mn{G`bho{^}3JwP3k$Wm+@JT;%_px;d=B}VSwe{%N{C6aZOCj0u z9w_IC6}f>GLk`@kOj$fnr|(=JLMb2o07d-wBR6tn+AxPdwruqoT4e9gn$&`@! z?msR3w@5`q*pN`>h5@E?L<-zoqYM=yqVXrICNtY*^f;43FJd2i4VwkmpQ> z;TM)4f(o;hpmS^+ciFH!csKJI@!^#^ypU}{c-`HDd#!CCqS9gr7|;l-vuubMBX9Ud zKqGU%VyrQ9BcxK-64ApEC&7x?9G)i;gdsz?HM&_GTIkUN}UPD^AACH z`J;G2at;3Og%4p+>H^b;^syYgDF`2Kc>!-XK8o$nXeRo3KzQKyF2ZG=K6kZoIpLFP z4Vh2=vE;jNCw6uXhD+Nt34cv7YCA?@eGSWrHwWdgGLOs4czl|7q zFc}=;%z?WN)HQ-dD4QZvFgP+{8r>Eeb_l%<~~t+qXaysQvG zE*sKFotQ1AcW!@z`fki52QM=rjUNx7+MnmrhR6EQCWcz{OGk`O(J(|IeMA(NGM;*@ zTnZyM)l;&e3u*ml99%vml{gqXhFEBw4x&!Ff%v)%%czOkplP2raY_(Nblx%t8*8Kq zx3g+Qj_)?`de1VDqppE@=l12A2D}GYMPE>OvILjzFaWtZdW6onUkLe2Z(-mXIoP~S zmuOG{K(?)hsCvPI&H?$*wzLx)KY`0!Z+!(v2v-towyKc7u#2H$%P=&i@jW~?eiw1+ z)=p5`ZVV0QUxf4DFT(g@Eu@;AOj0|Xz}1-PNDveNcDAnq;p&&*xcn?~z$t+K&{%{U z4wM2T+ntfONjzA!@ILH_(jZwD0dPA$7)}o;0;XUwsS)9Y70qx!2hX^Ia_(--;AIp! zb;KC7G_?j^*gX(V*S-kW!$&Z)gNw7L?;w@67NUr94f1`ZIrCnfC#p?pN3m{BXmf}$ zI=!Lmvq4=$ryrRQMXo^)Y}Io~`7>=VQ@?Lm0Jvhz+h#tW5HD>3{>; zwcLwml+d$N(y(>oF&O#5)QVqwk{;Z|rk}l6qjM)7p?oa->2{|;Ds)5%uc>1m)#kp7 zwl3&T-%{#N`@gEh5HL+?>$AMB`fCk{QKkm8+Vt}7V_ zRPYL8A@6#ho#b_`W%Qi2%V<8Q5=D#F(+}^J(lU$2;rR#hNUX{PgEN&;Ez1|m^HuS3 zr+q}-pKO@rz#|9G1+aPdEp%+TK6ycLGvJxB$ODG%M0(8uqHD^2LU3{liW!-Q?!R4* z&(c{B9`5AB3R)dLx}-_U6wg69m?7G^nFCX`i$I3*Xi^8ii)o`&sBmxrG4!`gt}w|G zU2JpUUR*1SKWh#F8cKOs^Q>_Y?0sd?Hu?n=+-}8W4jzZMIHu^gOJ1-az7#Iqp+yKh zw&SfjaWLGw5sp!L3(gg&;Whr*1br%)5RHEfaw5|~;k68KABx)@x_q4LJsTyP@mXTdKOdg8i3#K+DYuqT7Xer>+xK|8kc=q0^M(V7>VeLMNye3wU~#Yb&<~Txr-q6u+s!bD#>i_~&Rm*EE6Hx8fRU z<@uv#7IJ9UwYoHlAxDu(kj z-0?v|U)-my0Nv$;NyhVtV_g#&_p7jPZM`{WB};w zyADsPk+t+z97XI~xC8{ScMy^J`Q&|ZA*@-Hgtoso!T;EA392{0hpZ?aI7`QpT)y1| z45^bMFEw?LDHgO0zfA=-9`cw-lJl;&+_Af@bbQrF58H2EP5t{7SL=3Nq zMyG2%$z7vTaVt#)`18OgcqkGBmT3CS&Sj3Q;58jba$5Z5=T5n0|5_N38^=2~G&<`G_j^S_5g);XpI-m`4 z6Uje5)Q~m0eyC~l{kA4&wWxZ@;%U>itv0%7Peld>TYNAM!21)b5y3$9{ujp_c>fr zYATn`Wj&;3V)-c8DUi;6HJw+ao{PWwScq;!j|SJU;i$c6I21ikv54Nf9-6$8gL@jS z5PQ%hXu+KKTfNx~F0@Od`2h!zl1>znIrSB;60{%sQyYnUcDgW2+8+8n^#|mkTI|vx zB{KilPSmUNPckG0%z^UP@!0w^@k~r(EFTWF=TUKnk}`wILSo ze*q6H`je}`odg_LZ^5Sx-GcWOW#FxZ5YOLM#GKJJ;r@Mp$7e?CfPP2@?8@DNt7^}{ z+S=q{%wtIvEvP}ajqheNvOkh%OIwIz?v|A@)oQ<>u0Ir z%EMM&Tn1dm&BtQWRw4Pl`-y#jE+pot-vni&&*SspVOXTO6t|dFhQ^JoCR)?xp^rQN zMCdT0^xKpvfPQbg$`-r!KOEm&~? zf$iJ0pk2N_Do+>>2dhtk&&wMjyuF)8>QWiXY*2*$2RQy%N=| z(t%tmv*1DvQ)*IBE*xf)NtGU1h)N;j2{fM$crh=6GL1$3>MGST+dj8|G%gc9DUT0!QNWPH7TBcV!4JE)SG6 zJ>e3+0_eB14xf#e;M;g!c(LX-aOz_h*mbHDBt%aq7VpWy#4S?93K0p*-*|#eZi9&X zA0`mdVn?h?l>~m1wqlWEyfNjze6Zkk5%f$ui9OVt1|JzN!Ly#Ff~xEF(9NgavTWoK zsPoi-u&6UfyQK1nyEa>3$~IegdHzwDe9aVAw#yL198VJRUbkW4&^(}K(N1h0*GMeL zl4Y*bxMGBnA{un>7hFqeCvLKEC$xFgN?dqdKww7ts90(^ez3g-TD7^N6E?Nnmw^LO zzH9`MLOmd|*X;r`yzgN7Pb#s=&k(m&Jqv|&o+BDwKY^XIH-N48OX2b42{19y7!2KH z0PY89iW<%}j5ea?|h<%Oh?c@#CEb^$#?z90Qk zD~HY$45t+rsgMIRDk#CcdDM*(0Xi19n7W$dNGq(qj8_S@(P@PQTz``<@IIXcbXH|> zrIiBk)Van4)wTehJ2R2+-);ssIR6Pv4a>o?rE+j&O$Pq%DF^%IZVH|}42Sn>6W~fW zS?v6EdBQutk|=rRg5%k~@Jc`zn1-bSd9N1)`_&A*vsnr0tZ%h++o2BX=on0a^BecZ z{(5AI1rf2{8Q8^_EVNK_B|d>}fd@x?;67FCM^>14gK*DX=o$A9q`A=!v7aWOlW{Dp zY4sjrx=|y_i77(iH%nlUe<;}Uq8(65T)4(!(`kH(c_eh0ugdKsuwt(58h{f` z&2a6hMQCpobDg$J0jez>hCe+ek2#Abph=IHqkdBe*r~r1xQpVz;SWVn?f~?E=J277(NCClH2p_ z;aSX*%2n1udTX8_i~E~N-l&b_(2`eZn1M7}BLAFdIH2xmoSgOiBVb>Sz8QGuk5LAF)R-A#hJ2Ch`>L7m1 zeVrwKIhF8G$^{+zH{gpyGVrbcY~oDW0&L=)RIv4|0^F6cflv!u4ZP=F#V?Om#M4)% zfseoL2W^(E+(MZ|Y_N_m+-1tL73w6`>|Ll>eKa|_I>FIg(4XjErCK;wa9!7}GHZFV zUD;aQD*0u;HCt{e*@03uJAF5KyT(AiZR>lRYBM>ln!NRQt2<7e73i7eSGNh>Y*zFS zt}d<^QWLt{RdD*ij_RXx0&6I{LR%4Ey86~{QPrZ1$g0d!`vvPSFRO8_n9pBpoKbDI zdSunR&gWHuX^X0C+!OhyJ6~5_ym7PgWx*o>s`n``TX(<8H1-`o@!Imr5Rb4*bYXhc zvt7@swmT;B=bQKl`bSKydR;i8x-_=F%IRYz|H^dznnkX+_%|ur>fQQpsvlMB+rr~5 z)ssC3S2sLeU%6$|K*5qw^{NSIu9gSw8gVGh35W)8^W&OK)rQT<$vDEa+E1K<-TKq=zPTH$}&5 z0}P3}cAUN?1iy+ahehQI2O zTg{3&ysA6d8P!vpkl?J!WPSsU39=H5s@A{w#ctJw)m70__BDaNq}@DSOFJX|6M~ne zJNVAyeyv^`(zixiTdMj;aANhoOSN#~Y zr=8~Q-8c$4hb8mE;@{HG%H?@)_dTN98(yH78*Hct`7h|y$}D=;?M(W}iw>$^^KzIY zT13Tny7NZwUqd~5Z$iJ)n$HWA!%5eu3X*CmptP>7KoM(Z(<6%B@bnL0a8AQfN>r49 z8jaVId0`JxnZ^dF7O;UlpqL3a1ni=_!X2SsNglalX)>BIMhE#97$E7O^{CQq6)C+- zhCH9Mk$PCWjJYQ@kV?Ngj69r=p}H7(o{7vuWcb9EqWWy6BXmZfIltx7!kaFXY=kD9 zZZ?XVgdIkQE9-c>TUOZ)I2_K;OE9uYyvVh+o^aJ>KPG2u<}%#IicYp|oTbO_XlC0s z78}^=oJ!%Fb~@PRJy~QkF@1-PLeo-POP?R&3i%cH$*8$)y9_M>8+Yvn%l#OnmLZ&M1z ztJg)8U@ld3K^4t1JjwGbR3qi*IP=CDI`bM^X`Z@UChayk1$B%$P5H{6r~UT6CR<`x z(reUR>9?C2$QOyW)EK7%-tgp^bkzEr^h2u$yzJ@=DCE8cZ8hGR@>gDjSgx1o>+6s5 zw!FDvS9q zE4yPq@yP8Ou}@H~SGi2>Y3_v@kIOS_Ei^aOI@~F?QY8A=G+vM3g^j#uJ$18_RjpzK zFMzk(3VobP7YmYYcBW)oC9iU~$-j2U`T{OvvtwX`bsxNy*SSR5COyv4x<6N!_hhcQ zjh*HYo4NI+R$eW~tRD71Y5n%zZL31-U~5pAVho7EqI+147Jl~xh*Gp(ZKj#+CR z*RVksPCLr)OSD_7er$-8l#!EE_m7d&0ZY!7;%)3g8MZD z1cTP+7_|QhgU&x=(ES`kzt1q}{WJzPOP?dp9NTDdB=f=zII=xfZbUylP=+hT-Yvt{XBlyHq8WO9i-v5LF-N|e;&BX* zO*jfs;`rF`;5fD+YamCzXH|TgrRpqGj&jd{UYq{|?tf19?-KZbvl7ghRb%qys`+}w zbjR143B(|d0u!xZu}~rcX1xu`QaA5yP)x(3w?Q+Zme^VL?vU7Vdm9ovthXVt!+RSN zJIP^6k!HwCaM+&+genVg`U=D2B`jxFbsU?+f*ciLq*xRi9UeV}SqJfAwkiur21SR6 zX9+{XgX0-5B;zG2CQ2L~KO-uR89*@u62y@)!HhUC;-bl-IcmZg(?YseNxXQPa8{(K z8@CQygT>>h#))GSB>w-B4d%u5W7zjKGrMiR(SMC8py$uOz9C{lP(m3`uddW7pcZbAo zNN+=8=hWMf*bVJ%^pdSbB8kI3m&D=!B#F-dEQv0h&$85r&m__HrzKH~HS$}suEKKr zTg;gZ#Sp3g-^!`UNTgbl*-+F5JiJnUM$LWzziKa60Np@5ATl*wqmr9g! z)c5%5{s})vf67mfAMw-kYkvOMDM^X-Z*!0tOYf)osmc(@tCv8C36o^Um``_1;P|ms z{G7&)bQr zs}A(o`t%euzC8tvUr#~f&lEJwkN}Rf6|)KCNLw?TaU5wIW;326&1W_fIMTLkdDcXZ z42inw@GbC)f9FU&^mjcAKgpaw0zZ!9^HoHZKg+Xv^e2u#>-mvyNuVSJ8xsME3Ng?n zl^ugwQrR(JC6yh6@;{Xw6C=q$CODFTOjINTnQ%x3PV61{W%&Ctof?c`cZ|O5{9gMz z`TILv5|7<-Iq7?HDf~n(Cx0rJq94g+(ARSLU#BZ2*1ye9W-QmAmdo$O`pZ5(lZAsn z&BDs8kWVwJB1_DXi;syHMY0uHp&Ys3m>JRW-ACA!v7GX`W!UGI;s3Fm`j;hB8b&as zVQ924Vx}l|O3z8h6prXC>KizPh@xPR41TSJfPKeG5k)%xd&61wO3*@=FrWYd`k zf4@veX5U8il?;Cy8~vj-{?Ai>h3zM&MrQTzr}+xmcc!ua z(KM4d0bilf_zTTvmBhDU*!+9{3GgTU^MB<3TP2YCXRrS|(XsuJ_lX?;ucFiAz4z63 zd#Ap=Fn?IEd?`zkwxmWD_avgw-n6grgK8%V8OrMA7F~7ZaMoA;BO`Q n9`*aS({5q>ky8nz@E%B0KZW$Ej{XZsUHZyCmn0{B`ejzF&X#^D3;+o5-(ZFMRyCCz#LlbGo9xG8%QA)e?1{s97U`4w~EOK8%$PYlA9dB z$i_EiGqVPZ>FF@}E>RgtBcQMr*IY)81|HMApOTj+?`JaQaChBwTU18M1SqF8S&o^X zC6RmWtjUJV1}y*AZn-%*mD!a=Y{iYL$?KSnIg+?Kn?z+KEWpNJVCLrA#H`W4XnNzM z)+$jMVM~~;Tob^y>e|*mpX|qC%X0hJM2^Y*EG8UlUS`bXk`V>DLvHd*7CzCubMKfK UfFPBbfdR}oI6+KmG7oDY0H}ONlK=n! From 7b26d6699a6bd444ce53cfc86493465b0112a4e6 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Thu, 13 Jul 2023 12:59:44 -0500 Subject: [PATCH 092/182] Adjust location of KerasTensor. --- .../Keras/Engine/KerasTensor.cs | 64 ++++++++ src/TensorFlowNET.Core/Tensors/KerasTensor.cs | 53 ------- .../Tensors/Tensor.Conversions.cs | 17 +- .../Tensors/Tensor.Keras.cs | 27 ++++ src/TensorFlowNET.Core/Tensors/Tensor.cs | 5 - src/TensorFlowNET.Keras/Models/ModelsApi.cs | 25 ++- .../Saving/SavedModel/load.cs | 146 +++++++++--------- 7 files changed, 173 insertions(+), 164 deletions(-) create mode 100644 src/TensorFlowNET.Core/Keras/Engine/KerasTensor.cs delete mode 100644 src/TensorFlowNET.Core/Tensors/KerasTensor.cs create mode 100644 src/TensorFlowNET.Core/Tensors/Tensor.Keras.cs diff --git a/src/TensorFlowNET.Core/Keras/Engine/KerasTensor.cs b/src/TensorFlowNET.Core/Keras/Engine/KerasTensor.cs new file mode 100644 index 000000000..9287284f7 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/Engine/KerasTensor.cs @@ -0,0 +1,64 @@ +namespace Tensorflow.Keras.Engine; + +/// +/// A representation of a Keras in/output during Functional API construction. +/// +public class KerasTensor +{ + private Tensors _original_tensors; + public Tensors original_tensors + { + get => _original_tensors; + set => _original_tensors = value; + } + + private Shape _inferred_value; + public Shape inferred_value => _inferred_value; + + private string _name; + private TensorSpec _type_spec; + public Shape shape => _type_spec.shape; + public TF_DataType dtype => _type_spec.dtype; + + public KerasTensor(TensorSpec type_spec, Shape inferred_value = null, string name = null) + { + _type_spec = type_spec; + _inferred_value = inferred_value; + _name = name; + } + + public static KerasTensor from_tensor(Tensor tensor) + { + var type_spec = tensor.ToTensorSpec(); + var kt = new KerasTensor(type_spec, name: tensor.name); + kt.original_tensors = tensor; + return kt; + } + + public override string ToString() + => _original_tensors.Length switch + { + > 1 => "[" + string.Join(", ", _original_tensors.Select(x => $"KerasTensor: shape={x.shape} dtype={x.dtype}")) + "]", + 1 => $"KerasTensor: shape={_original_tensors.shape} {GetInferredValueString()} dtype={_original_tensors.dtype}", + _ => _original_tensors.ToString(), + }; + + private string GetInferredValueString() + => _inferred_value == null ? "" : ""; + + public static implicit operator Tensors(KerasTensor kt) + => kt._original_tensors; + + public static implicit operator Tensor(KerasTensor kt) + { + Tensor tensor = kt._original_tensors; + tensor.IsFromKerasTensor = true; + return tensor; + } + + public static implicit operator KerasTensor(Tensor tensor) + => from_tensor(tensor); + + public static implicit operator KerasTensor(Tensors tensors) + => from_tensor(tensors.First()); +} diff --git a/src/TensorFlowNET.Core/Tensors/KerasTensor.cs b/src/TensorFlowNET.Core/Tensors/KerasTensor.cs deleted file mode 100644 index 3204b4ac0..000000000 --- a/src/TensorFlowNET.Core/Tensors/KerasTensor.cs +++ /dev/null @@ -1,53 +0,0 @@ -namespace Tensorflow.Keras.Engine; - -/// -/// A representation of a Keras in/output during Functional API construction. -/// -public class KerasTensor -{ - private Tensors _inferred_value; - public Tensors inferred_value - { - get => _inferred_value; - set => _inferred_value = value; - } - - private string _name; - private TensorSpec _type_spec; - public Shape shape => _type_spec.shape; - public TF_DataType dtype => _type_spec.dtype; - - public KerasTensor(TensorSpec type_spec, string name = null) - { - _type_spec = type_spec; - _name = name; - } - - public static KerasTensor from_tensor(Tensor tensor) - { - var type_spec = tensor.ToTensorSpec(); - var kt = new KerasTensor(type_spec, name: tensor.name); - kt.inferred_value = tensor; - return kt; - } - - public override string ToString() - => _inferred_value.Length switch - { - > 1 => "[" + string.Join(", ", _inferred_value.Select(x => $"")) + "]", - 1 => $"", - _ => _inferred_value.ToString(), - }; - - public static implicit operator Tensors(KerasTensor kt) - => kt._inferred_value; - - public static implicit operator Tensor(KerasTensor kt) - => kt._inferred_value; - - public static implicit operator KerasTensor(Tensor tensor) - => from_tensor(tensor); - - public static implicit operator KerasTensor(Tensors tensors) - => from_tensor(tensors.First()); -} diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs index 18bdc1aaf..fdd62aeed 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs @@ -14,19 +14,10 @@ You may obtain a copy of the License at limitations under the License. ******************************************************************************/ -using Tensorflow.NumPy; -using System; -using System.Diagnostics.CodeAnalysis; -using System.Text; -using Tensorflow.Framework.Models; -using static Tensorflow.Binding; +namespace Tensorflow; -namespace Tensorflow +public partial class Tensor { - [SuppressMessage("ReSharper", "InvokeAsExtensionMethod")] - public partial class Tensor - { - public TensorSpec ToTensorSpec() - => new TensorSpec(shape, dtype, name); - } + public TensorSpec ToTensorSpec() + => new TensorSpec(shape, dtype, name); } \ No newline at end of file diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Keras.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Keras.cs new file mode 100644 index 000000000..ca946ca48 --- /dev/null +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Keras.cs @@ -0,0 +1,27 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +namespace Tensorflow; + +public partial class Tensor +{ + public bool IsFromKerasTensor { get; set; } + + /// + /// Keras History: (Layer, (node_index, tensor_index)) + /// + public KerasHistory KerasHistory { get; set; } +} \ No newline at end of file diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs index c0e5d4357..65e1c8576 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs @@ -146,11 +146,6 @@ public int[] _shape_tuple() return rank < 0 ? null : shape.dims.Select(x => (int)x).ToArray(); } - /// - /// Keras History: (Layer, (node_index, tensor_index)) - /// - public KerasHistory KerasHistory { get; set; } - /// /// Updates the shape of this tensor. /// diff --git a/src/TensorFlowNET.Keras/Models/ModelsApi.cs b/src/TensorFlowNET.Keras/Models/ModelsApi.cs index 44dca58d0..2605c41e3 100644 --- a/src/TensorFlowNET.Keras/Models/ModelsApi.cs +++ b/src/TensorFlowNET.Keras/Models/ModelsApi.cs @@ -1,22 +1,15 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Text; -using Tensorflow.Keras.Engine; -using Tensorflow.Keras.Saving; +using Tensorflow.Keras.Saving; using Tensorflow.Keras.Saving.SavedModel; -using ThirdParty.Tensorflow.Python.Keras.Protobuf; -namespace Tensorflow.Keras.Models +namespace Tensorflow.Keras.Models; + +public class ModelsApi: IModelsApi { - public class ModelsApi: IModelsApi - { - public Functional from_config(FunctionalConfig config) - => Functional.from_config(config); + public Functional from_config(FunctionalConfig config) + => Functional.from_config(config); - public IModel load_model(string filepath, bool compile = true, LoadOptions? options = null) - { - return KerasLoadModelUtils.load_model(filepath, compile: compile, options: options) as Model; - } + public IModel load_model(string filepath, bool compile = true, LoadOptions? options = null) + { + return KerasLoadModelUtils.load_model(filepath, compile: compile, options: options) as Model; } } diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/load.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/load.cs index aa763fc2e..091dbb810 100644 --- a/src/TensorFlowNET.Keras/Saving/SavedModel/load.cs +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/load.cs @@ -1,97 +1,89 @@ -using Google.Protobuf; -using System; -using System.Collections.Generic; -using System.IO; -using System.Text; -using Tensorflow.Keras.Engine; +using System.IO; using Tensorflow.Train; using ThirdParty.Tensorflow.Python.Keras.Protobuf; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; -namespace Tensorflow.Keras.Saving.SavedModel +namespace Tensorflow.Keras.Saving.SavedModel; + +public class KerasLoadModelUtils { - public class KerasLoadModelUtils + /// + /// Corresponding to keras/saving/save.py/load_model + /// + /// + /// + /// + /// + /// + public static Trackable load_model(string filepath, IDictionary? custom_objects = null, + bool compile = true, LoadOptions? options = null) { - /// - /// Corresponding to keras/saving/save.py/load_model - /// - /// - /// - /// - /// - /// - public static Trackable load_model(string filepath, IDictionary? custom_objects = null, - bool compile = true, LoadOptions? options = null) + using var savingScope = SharedObjectSavingScope.Enter(); + + using var ctx = LoadContext.load_context(options); + + if (!File.Exists(filepath) && !Directory.Exists(filepath)) { - using (SharedObjectSavingScope.Enter()) - { - using (LoadContext.load_context(options)) - { - if (!File.Exists(filepath) && !Directory.Exists(filepath)) - { - throw new IOException($"No file or directory found at {filepath}."); - } - if (Directory.Exists(filepath)) - { - return load(filepath, compile, options); - } - else - { - throw new NotImplementedException("Model load of h5 format has not been supported. Please submit an issue to https://github.com/SciSharp/TensorFlow.NET/issues if it's needed."); - } - } - } + throw new IOException($"No file or directory found at {filepath}."); } - private static Trackable load(string path, bool compile = true, LoadOptions? options = null) + if (Directory.Exists(filepath)) + { + return load(filepath, compile, options); + } + else { - SavedMetadata metadata = new SavedMetadata(); - var meta_graph_def = Loader.parse_saved_model(path).MetaGraphs[0]; - var object_graph_def = meta_graph_def.ObjectGraphDef; - string path_to_metadata_pb = Path.Combine(path, Constants.SAVED_METADATA_PATH); - if (File.Exists(path_to_metadata_pb)) - { - metadata.MergeFrom(new FileStream(path_to_metadata_pb, FileMode.Open, FileAccess.Read)); - } - else - { - throw new NotImplementedException("SavedModel saved prior to TF 2.5 detected when loading Keras model, please" + - " use higher version or submit an issue to https://github.com/SciSharp/TensorFlow.NET/issues. to let us know you need it."); - } + throw new NotImplementedException("Model load of h5 format has not been supported. Please submit an issue to https://github.com/SciSharp/TensorFlow.NET/issues if it's needed."); + } + } - if (metadata.Nodes is null || metadata.Nodes.Count == 0) - { - return Loader.load(path, options: options) as Model; - } + private static Trackable load(string path, bool compile = true, LoadOptions? options = null) + { + SavedMetadata metadata; + var meta_graph_def = Loader.parse_saved_model(path).MetaGraphs[0]; + var object_graph_def = meta_graph_def.ObjectGraphDef; + string path_to_metadata_pb = Path.Combine(path, Constants.SAVED_METADATA_PATH); + if (File.Exists(path_to_metadata_pb)) + { + using var stream = new FileStream(path_to_metadata_pb, FileMode.Open, FileAccess.Read); + metadata = SavedMetadata.Parser.ParseFrom(stream); + } + else + { + throw new NotImplementedException("SavedModel saved prior to TF 2.5 detected when loading Keras model, please" + + " use higher version or submit an issue to https://github.com/SciSharp/TensorFlow.NET/issues. to let us know you need it."); + } - var keras_loader = new KerasObjectLoader(metadata, object_graph_def); - keras_loader.load_layers(compile: compile); + if (metadata.Nodes is null || metadata.Nodes.Count == 0) + { + return Loader.load(path, options: options) as Model; + } - Dictionary)> nodes_to_load = new(); - nodes_to_load["root"] = (null, null); - foreach(var item in keras_loader.LoadedNodes) - { - nodes_to_load[keras_loader.get_path(item.Key)] = item.Value; - } - var loaded = Loader.load_partial(path, nodes_to_load, options); + var keras_loader = new KerasObjectLoader(metadata, object_graph_def); + keras_loader.load_layers(compile: compile); - keras_loader.finalize_objects(); - keras_loader.del_tracking(); + Dictionary)> nodes_to_load = new(); + nodes_to_load["root"] = (null, null); + foreach(var item in keras_loader.LoadedNodes) + { + nodes_to_load[keras_loader.get_path(item.Key)] = item.Value; + } + var loaded = Loader.load_partial(path, nodes_to_load, options); - var model = loaded["root"]; + keras_loader.finalize_objects(); + keras_loader.del_tracking(); - if(model is Model && compile) - { - // TODO(Rinne): implement it. - } + var model = loaded["root"]; - if (!tf.Context.executing_eagerly()) - { - // TODO(Rinne): implement it. - } + if (model is Model && compile) + { + // TODO(Rinne): implement it. + } - return model; + if (!tf.Context.executing_eagerly()) + { + // TODO(Rinne): implement it. } + + return model; } } From 03b44c3b502f38509eff6453a0b40c70d114be76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Fri, 14 Jul 2023 18:39:58 +0800 Subject: [PATCH 093/182] ignore the LSTMLoad test --- test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs index 299337cde..cb570fc0c 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs @@ -81,6 +81,7 @@ public void ModelWithSelfDefinedModule() model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size, num_epochs); } + [Ignore] [TestMethod] public void LSTMLoad() { From 3bef87aefcb84379af5e838ed2dcb8cdc897b4a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Fri, 14 Jul 2023 23:36:12 +0800 Subject: [PATCH 094/182] fix: make the initialization of the layer's name correct --- .../Utils/generic_utils.cs | 14 +++++--- .../InitLayerNameTest.cs | 33 +++++++++++++++++++ 2 files changed, 42 insertions(+), 5 deletions(-) create mode 100644 test/TensorFlowNET.Keras.UnitTest/InitLayerNameTest.cs diff --git a/src/TensorFlowNET.Keras/Utils/generic_utils.cs b/src/TensorFlowNET.Keras/Utils/generic_utils.cs index 6a59fb880..5402f4995 100644 --- a/src/TensorFlowNET.Keras/Utils/generic_utils.cs +++ b/src/TensorFlowNET.Keras/Utils/generic_utils.cs @@ -29,6 +29,7 @@ limitations under the License. using Tensorflow.Keras.Layers; using Tensorflow.Keras.Saving; using Tensorflow.Train; +using System.Text.RegularExpressions; namespace Tensorflow.Keras.Utils { @@ -126,12 +127,15 @@ public static FunctionalConfig deserialize_model_config(JToken json) public static string to_snake_case(string name) { - return string.Concat(name.Select((x, i) => + string intermediate = Regex.Replace(name, "(.)([A-Z][a-z0-9]+)", "$1_$2"); + string insecure = Regex.Replace(intermediate, "([a-z])([A-Z])", "$1_$2").ToLower(); + + if (insecure[0] != '_') { - return i > 0 && char.IsUpper(x) && !Char.IsDigit(name[i - 1]) ? - "_" + x.ToString() : - x.ToString(); - })).ToLower(); + return insecure; + } + + return "private" + insecure; } /// diff --git a/test/TensorFlowNET.Keras.UnitTest/InitLayerNameTest.cs b/test/TensorFlowNET.Keras.UnitTest/InitLayerNameTest.cs new file mode 100644 index 000000000..256eb69c1 --- /dev/null +++ b/test/TensorFlowNET.Keras.UnitTest/InitLayerNameTest.cs @@ -0,0 +1,33 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow.Keras.Layers; +using static Tensorflow.Binding; +using static Tensorflow.KerasApi; + +namespace Tensorflow.Keras.UnitTest +{ + [TestClass] + public class InitLayerNameTest + { + [TestMethod] + public void RNNLayerNameTest() + { + var simpleRnnCell = keras.layers.SimpleRNNCell(1); + Assert.AreEqual("simple_rnn_cell", simpleRnnCell.Name); + var simpleRnn = keras.layers.SimpleRNN(2); + Assert.AreEqual("simple_rnn", simpleRnn.Name); + var lstmCell = keras.layers.LSTMCell(2); + Assert.AreEqual("lstm_cell", lstmCell.Name); + var lstm = keras.layers.LSTM(3); + Assert.AreEqual("lstm", lstm.Name); + } + + [TestMethod] + public void ConvLayerNameTest() + { + var conv2d = keras.layers.Conv2D(8, activation: "linear"); + Assert.AreEqual("conv2d", conv2d.Name); + var conv2dTranspose = keras.layers.Conv2DTranspose(8); + Assert.AreEqual("conv2d_transpose", conv2dTranspose.Name); + } + } +} From 6ec39ba3cbfacb26096903a628db88ece042bf16 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Sun, 16 Jul 2023 21:17:40 -0500 Subject: [PATCH 095/182] Fix inferred_value of KerasTensor. #1142 --- src/TensorFlowNET.Core/APIs/tf.reshape.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.tile.cs | 2 +- src/TensorFlowNET.Core/GlobalUsing.cs | 3 +- .../Keras/Engine/KerasTensor.cs | 19 +++++++++--- .../Operations/array_ops.cs | 29 +++++++++++++++++-- src/TensorFlowNET.Core/Tensors/shape_utils.cs | 27 +++++++++++++++++ src/TensorFlowNET.Core/Tensors/tf.constant.cs | 3 ++ src/TensorFlowNET.Core/ops.cs | 11 +++++-- .../Tensorflow.Keras.csproj | 2 +- .../Tensorflow.Binding.UnitTest.csproj | 4 +-- 10 files changed, 88 insertions(+), 14 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.reshape.cs b/src/TensorFlowNET.Core/APIs/tf.reshape.cs index 5da7b795f..102a81323 100644 --- a/src/TensorFlowNET.Core/APIs/tf.reshape.cs +++ b/src/TensorFlowNET.Core/APIs/tf.reshape.cs @@ -31,6 +31,6 @@ public Tensor reshape(Tensor tensor, public Tensor reshape(Tensor tensor, object[] shape, string name = null) - => gen_array_ops.reshape(tensor, ops.convert_to_tensor(shape), name); + => array_ops.reshape(tensor, shape, name); } } diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs index 65975ac83..1220230d6 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tile.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs @@ -23,7 +23,7 @@ public Tensor tile(Tensor input, Tensor multiples, string name = null) => gen_array_ops.tile(input, multiples, name); public Tensor tile(Tensor input, object[] multiples, string name = null) - => gen_array_ops.tile(input, ops.convert_to_tensor(multiples), name); + => array_ops.tile(input, multiples, name); public Tensor tile(Tensor input, Shape multiples, string name = null) { diff --git a/src/TensorFlowNET.Core/GlobalUsing.cs b/src/TensorFlowNET.Core/GlobalUsing.cs index 209bc291f..7e02c9083 100644 --- a/src/TensorFlowNET.Core/GlobalUsing.cs +++ b/src/TensorFlowNET.Core/GlobalUsing.cs @@ -5,4 +5,5 @@ global using System.Data; global using System.Linq; global using Tensorflow.Keras.Engine; -global using Tensorflow.Framework.Models; \ No newline at end of file +global using Tensorflow.Framework.Models; +global using static Tensorflow.Binding; \ No newline at end of file diff --git a/src/TensorFlowNET.Core/Keras/Engine/KerasTensor.cs b/src/TensorFlowNET.Core/Keras/Engine/KerasTensor.cs index 9287284f7..5a264b631 100644 --- a/src/TensorFlowNET.Core/Keras/Engine/KerasTensor.cs +++ b/src/TensorFlowNET.Core/Keras/Engine/KerasTensor.cs @@ -30,21 +30,32 @@ public KerasTensor(TensorSpec type_spec, Shape inferred_value = null, string nam public static KerasTensor from_tensor(Tensor tensor) { var type_spec = tensor.ToTensorSpec(); - var kt = new KerasTensor(type_spec, name: tensor.name); + Shape? inferred_value = default; + if (tensor.dtype == TF_DataType.TF_INT32 && tensor.rank < 2) + { + inferred_value = tf.ones(tensor).shape; + } + var kt = new KerasTensor(type_spec, inferred_value: inferred_value, name: tensor.name); kt.original_tensors = tensor; return kt; } + public KerasTensor this[int idx] + => _original_tensors.First()[idx]; + + public KerasTensor this[params Slice[] slices] + => _original_tensors.First()[slices]; + public override string ToString() => _original_tensors.Length switch { - > 1 => "[" + string.Join(", ", _original_tensors.Select(x => $"KerasTensor: shape={x.shape} dtype={x.dtype}")) + "]", - 1 => $"KerasTensor: shape={_original_tensors.shape} {GetInferredValueString()} dtype={_original_tensors.dtype}", + > 1 => "[" + string.Join(", ", _original_tensors.Select(x => $"KerasTensor: shape={x.shape} dtype={x.dtype.as_numpy_name()}{GetInferredValueString()}")) + "]", + 1 => $"KerasTensor: shape={_original_tensors.shape} dtype={_original_tensors.dtype.as_numpy_name()}{GetInferredValueString()}", _ => _original_tensors.ToString(), }; private string GetInferredValueString() - => _inferred_value == null ? "" : ""; + => _inferred_value == null ? "" : $" inferred_value={_inferred_value}"; public static implicit operator Tensors(KerasTensor kt) => kt._original_tensors; diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 02bf0e868..9d4647fac 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -137,7 +137,7 @@ public static Tensor zeros(Tensors shape, TF_DataType dtype = TF_DataType.TF_FLO if(shape.Length > 1) { shapeTensor = ops.convert_to_tensor(shape, dtypes.int32); - if(shapeTensor.ndim > 1) + if (shapeTensor.ndim > 1) { shapeTensor = array_ops.reshape(shapeTensor, new Shape(-1)); } @@ -304,6 +304,10 @@ public static Tensor _autopacking_helper(IEnumerable list_or_tuple, TF_D { elems_as_tensors.Add(tensor); } + else if (elem is KerasTensor kt) + { + elems_as_tensors.Add(kt); + } else { var elem_tensor = constant_op.constant(elem, dtype: dtype, name: i.ToString()); @@ -404,7 +408,10 @@ public static Tensor reshape(Tensor tensor, Shape shape, string name = null) => gen_array_ops.reshape(tensor, shape, name: name); public static Tensor reshape(Tensor tensor, object[] shape, string name = null) - => gen_array_ops.reshape(tensor, ops.convert_to_tensor(shape), name: name); + { + var dims = shape_utils.from_object_array(shape); + return gen_array_ops.reshape(tensor, dims, name: name); + } private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true) { @@ -425,6 +432,10 @@ public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT return tf_with(ops.name_scope(name, "ones", new { shape }), scope => { name = scope; + if (shape._shape_tuple().Length == 0) + { + shape = reshape(shape, new Shape(-1)); + } var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name); return output; }); @@ -647,6 +658,20 @@ public static Tensor tile(Tensor input, Tensor multiples, string name = null) } }); + public static Tensor tile(Tensor input, object[] multiples, string name = null) + { + Shape dims = shape_utils.from_object_array(multiples); + + return tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, dims) + { + GetGradientAttrs = (op) => new + { + T = op.get_attr("T"), + Tmultiples = op.get_attr("Tmultiples") + } + }); + } + public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) { return tf_with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => diff --git a/src/TensorFlowNET.Core/Tensors/shape_utils.cs b/src/TensorFlowNET.Core/Tensors/shape_utils.cs index 254cdad89..a77dd34ce 100644 --- a/src/TensorFlowNET.Core/Tensors/shape_utils.cs +++ b/src/TensorFlowNET.Core/Tensors/shape_utils.cs @@ -1,5 +1,6 @@ using System; using System.Linq; +using Tensorflow.Eager; using static Tensorflow.Binding; namespace Tensorflow @@ -13,5 +14,31 @@ public static Tensor static_or_dynamic_map_fn(Func fn, Tensor el throw new NotImplementedException(""); } + + public static Shape from_object_array(object[] shape) + { + var dims = shape.Select(x => + { + if (x is KerasTensor kt && kt.inferred_value != null) + { + return kt.inferred_value.as_int_list()[0]; + } + else if (x is EagerTensor et && et.dtype == TF_DataType.TF_INT32) + { + return et.ToArray()[0]; + } + else if (x is int i) + { + return i; + } + else if (x is long l) + { + return l; + } + throw new NotImplementedException(); + }).ToArray(); + + return new Shape(dims); + } } } diff --git a/src/TensorFlowNET.Core/Tensors/tf.constant.cs b/src/TensorFlowNET.Core/Tensors/tf.constant.cs index 6a62d34a5..ac26b3da3 100644 --- a/src/TensorFlowNET.Core/Tensors/tf.constant.cs +++ b/src/TensorFlowNET.Core/Tensors/tf.constant.cs @@ -46,6 +46,9 @@ public Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, stri public Tensor ones(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) => array_ops.ones(shape, dtype, name); + public Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + => array_ops.ones(shape, dtype, name); + public Tensor size(Tensor input, string name = null, TF_DataType out_type = TF_DataType.TF_INT32) => array_ops.size(input, diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index c624c9901..351fd18ff 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -144,11 +144,18 @@ public static Tensor convert_to_tensor(object value, } if (!graph.building_function) { - throw new RuntimeError("Attempting to capture an EagerTensor without building a function."); - // return eager_tensor.AsPlaceholder(name: name); + // throw new RuntimeError("Attempting to capture an EagerTensor without building a function."); + return eager_tensor.AsPlaceholder(name: name); } } } + else if (value is KerasTensor kt) + { + if (kt.inferred_value != null) + { + return convert_to_tensor(kt.inferred_value, dtype: kt.dtype, name: name); + } + } // graph mode Tensor ret = value switch diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj index c7fa7711c..eeb7c559f 100644 --- a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -141,7 +141,7 @@ Keras is an API designed for human beings, not machines. Keras follows best prac - + diff --git a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj index 240960c91..7a6a7f92c 100644 --- a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj +++ b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj @@ -41,8 +41,8 @@ - - + + From 03472997e43ab36d447ca520907ee8dffcc03edc Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Tue, 18 Jul 2023 07:01:51 -0500 Subject: [PATCH 096/182] Fix tf.reverse. --- src/TensorFlowNET.Core/APIs/tf.array.cs | 15 +++++++++------ src/TensorFlowNET.Core/Operations/array_ops.cs | 18 +++++++++++++----- .../ManagedAPI/ArrayOpsTest.cs | 13 +++++++++++++ 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs index ecac37eb1..4d9c3da58 100644 --- a/src/TensorFlowNET.Core/APIs/tf.array.cs +++ b/src/TensorFlowNET.Core/APIs/tf.array.cs @@ -162,14 +162,17 @@ public Tensor transpose(T1 a, Axis perm = null, string name = "transpose", b /// Reverses specific dimensions of a tensor. /// /// - /// + /// The indices of the dimensions to reverse. Must be in the range [-rank(tensor), rank(tensor)). /// /// - public Tensor reverse(Tensor tensor, int[] axis, string name = null) - => gen_array_ops.reverse(tensor, ops.convert_to_tensor(axis), name: name); - - public Tensor reverse(Tensor tensor, Tensor axis, string name = null) - => gen_array_ops.reverse(tensor, axis, name: name); + public Tensor reverse(Tensor tensor, Axis axis, string name = null) + { + if (axis.IsScalar) + { + axis = new Axis(axis.axis); + } + return array_ops.reverse(tensor, axis, name: name); + } /// /// Returns the rank of a tensor. diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 9d4647fac..f80dcd2c4 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -413,6 +413,16 @@ public static Tensor reshape(Tensor tensor, object[] shape, string name = null) return gen_array_ops.reshape(tensor, dims, name: name); } + public static Tensor reverse(Tensor tensor, Tensor axis, string name = null) + => tf.Context.ExecuteOp("ReverseV2", name, new ExecuteOpArgs(tensor, axis) + { + GetGradientAttrs = (op) => new + { + T = op.get_attr("T"), + Tidx = op.get_attr("Tidx") + } + }); + private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true) { return tf_with(ops.name_scope(name, "ones_like", new { tensor }), scope => @@ -658,11 +668,9 @@ public static Tensor tile(Tensor input, Tensor multiples, string name = null) } }); - public static Tensor tile(Tensor input, object[] multiples, string name = null) + /*public static Tensor tile(Tensor input, Shape multiples, string name = null) { - Shape dims = shape_utils.from_object_array(multiples); - - return tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, dims) + return tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, multiples) { GetGradientAttrs = (op) => new { @@ -670,7 +678,7 @@ public static Tensor tile(Tensor input, object[] multiples, string name = null) Tmultiples = op.get_attr("Tmultiples") } }); - } + }*/ public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) { diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs index 72f598e46..675689bb1 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs @@ -2,6 +2,7 @@ using Tensorflow.NumPy; using Tensorflow; using static Tensorflow.Binding; +using System.Linq; namespace TensorFlowNET.UnitTest.ManagedAPI { @@ -92,5 +93,17 @@ public void TensorArray() Assert.AreEqual(ta.read(1).numpy(), 20f); Assert.AreEqual(ta.read(2).numpy(), 30f); } + + /// + /// https://www.tensorflow.org/api_docs/python/tf/reverse + /// + [TestMethod] + public void ReverseArray() + { + var a = tf.random.normal((2, 3)); + var b = tf.reverse(a, -1); + Assert.IsTrue(Equal(a[0].ToArray().Reverse().ToArray(), b[0].ToArray())); + Assert.IsTrue(Equal(a[1].ToArray().Reverse().ToArray(), b[1].ToArray())); + } } } From fa5d19dcdab55d7b81afc614f9929bc85c52cb20 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Tue, 18 Jul 2023 07:08:39 -0500 Subject: [PATCH 097/182] fix unit test. --- src/TensorFlowNET.Core/APIs/tf.tile.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs index 1220230d6..a3b497e8a 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tile.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs @@ -23,7 +23,7 @@ public Tensor tile(Tensor input, Tensor multiples, string name = null) => gen_array_ops.tile(input, multiples, name); public Tensor tile(Tensor input, object[] multiples, string name = null) - => array_ops.tile(input, multiples, name); + => array_ops.tile(input, constant_op.constant(shape_utils.from_object_array(multiples).dims), name); public Tensor tile(Tensor input, Shape multiples, string name = null) { From 0c9437afcb9cc5852abcbd31bcb85c08afef0ab7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Tue, 18 Jul 2023 23:31:45 +0800 Subject: [PATCH 098/182] feat: add Bidirectional layer --- .../ArgsDefinition/Rnn/BidirectionalArgs.cs | 20 ++ .../Keras/ArgsDefinition/Rnn/LSTMArgs.cs | 5 + .../Keras/ArgsDefinition/Rnn/RNNArgs.cs | 5 + .../Keras/ArgsDefinition/Rnn/WrapperArgs.cs | 24 ++ .../Keras/Layers/ILayersApi.cs | 14 +- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 14 + .../Layers/Rnn/BaseWrapper.cs | 33 +++ .../Layers/Rnn/Bidirectional.cs | 276 ++++++++++++++++++ src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs | 31 +- src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 11 +- .../Layers/Rnn.Test.cs | 13 +- 11 files changed, 428 insertions(+), 18 deletions(-) create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/BidirectionalArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/WrapperArgs.cs create mode 100644 src/TensorFlowNET.Keras/Layers/Rnn/BaseWrapper.cs create mode 100644 src/TensorFlowNET.Keras/Layers/Rnn/Bidirectional.cs diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/BidirectionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/BidirectionalArgs.cs new file mode 100644 index 000000000..d658a82e9 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/BidirectionalArgs.cs @@ -0,0 +1,20 @@ +using Newtonsoft.Json; +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.NumPy; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class BidirectionalArgs : AutoSerializeLayerArgs + { + [JsonProperty("layer")] + public ILayer Layer { get; set; } + [JsonProperty("merge_mode")] + public string? MergeMode { get; set; } + [JsonProperty("backward_layer")] + public ILayer BackwardLayer { get; set; } + public NDArray Weights { get; set; } + } + +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs index d816b0ff7..a6beb77e8 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs @@ -5,5 +5,10 @@ public class LSTMArgs : RNNArgs // TODO: maybe change the `RNNArgs` and implement this class. public bool UnitForgetBias { get; set; } public int Implementation { get; set; } + + public LSTMArgs Clone() + { + return (LSTMArgs)MemberwiseClone(); + } } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs index b84d30d3d..d0b73ba44 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs @@ -40,5 +40,10 @@ public class RNNArgs : AutoSerializeLayerArgs public bool ZeroOutputForMask { get; set; } = false; [JsonProperty("recurrent_dropout")] public float RecurrentDropout { get; set; } = .0f; + + public RNNArgs Clone() + { + return (RNNArgs)MemberwiseClone(); + } } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/WrapperArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/WrapperArgs.cs new file mode 100644 index 000000000..ec8e16d59 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/WrapperArgs.cs @@ -0,0 +1,24 @@ +using Newtonsoft.Json; +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Text; + + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class WrapperArgs : AutoSerializeLayerArgs + { + [JsonProperty("layer")] + public ILayer Layer { get; set; } + + public WrapperArgs(ILayer layer) + { + Layer = layer; + } + + public static implicit operator WrapperArgs(BidirectionalArgs args) + => new WrapperArgs(args.Layer); + } + +} diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index 1670f9d1d..b8aff5fb6 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -258,7 +258,19 @@ public IRnnCell GRUCell( float dropout = 0f, float recurrent_dropout = 0f, bool reset_after = true); - + + /// + /// Bidirectional wrapper for RNNs. + /// + /// `keras.layers.RNN` instance, such as `keras.layers.LSTM` or `keras.layers.GRU` + /// automatically. + /// + public ILayer Bidirectional( + ILayer layer, + string merge_mode = "concat", + NDArray weights = null, + ILayer backward_layer = null); + public ILayer Subtract(); } } diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index cb85bbba1..a04a9c051 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -908,6 +908,20 @@ public IRnnCell GRUCell( ResetAfter = reset_after }); + public ILayer Bidirectional( + ILayer layer, + string merge_mode = "concat", + NDArray weights = null, + ILayer backward_layer = null) + => new Bidirectional(new BidirectionalArgs + { + Layer = layer, + MergeMode = merge_mode, + Weights = weights, + BackwardLayer = backward_layer + }); + + /// /// /// diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/BaseWrapper.cs b/src/TensorFlowNET.Keras/Layers/Rnn/BaseWrapper.cs new file mode 100644 index 000000000..737f88cd4 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Rnn/BaseWrapper.cs @@ -0,0 +1,33 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.Saving; + +namespace Tensorflow.Keras.Layers +{ + /// + /// Abstract wrapper base class. Wrappers take another layer and augment it in various ways. + /// Do not use this class as a layer, it is only an abstract base class. + /// Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. + /// + public abstract class Wrapper: Layer + { + public ILayer _layer; + public Wrapper(WrapperArgs args):base(args) + { + _layer = args.Layer; + } + + public virtual void Build(KerasShapesWrapper input_shape) + { + if (!_layer.Built) + { + _layer.build(input_shape); + } + built = true; + } + + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/Bidirectional.cs b/src/TensorFlowNET.Keras/Layers/Rnn/Bidirectional.cs new file mode 100644 index 000000000..6114d9c7c --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Rnn/Bidirectional.cs @@ -0,0 +1,276 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using Tensorflow.Common.Types; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.Saving; + +namespace Tensorflow.Keras.Layers +{ + /// + /// Bidirectional wrapper for RNNs. + /// + public class Bidirectional: Wrapper + { + BidirectionalArgs _args; + RNN _forward_layer; + RNN _backward_layer; + RNN _layer; + bool _support_masking = true; + int _num_constants = 0; + bool _return_state; + bool _stateful; + bool _return_sequences; + InputSpec _input_spec; + RNNArgs _layer_args_copy; + public Bidirectional(BidirectionalArgs args):base(args) + { + _args = args; + if (_args.Layer is not ILayer) + throw new ValueError( + "Please initialize `Bidirectional` layer with a " + + $"`tf.keras.layers.Layer` instance. Received: {_args.Layer}"); + + if (_args.BackwardLayer is not null && _args.BackwardLayer is not ILayer) + throw new ValueError( + "`backward_layer` need to be a `tf.keras.layers.Layer` " + + $"instance. Received: {_args.BackwardLayer}"); + if (!new List { "sum", "mul", "ave", "concat", null }.Contains(_args.MergeMode)) + { + throw new ValueError( + $"Invalid merge mode. Received: {_args.MergeMode}. " + + "Merge mode should be one of " + + "{\"sum\", \"mul\", \"ave\", \"concat\", null}" + ); + } + if (_args.Layer is RNN) + { + _layer = _args.Layer as RNN; + } + else + { + throw new ValueError( + "Bidirectional only support RNN instance such as LSTM or GRU"); + } + _return_state = _layer.Args.ReturnState; + _return_sequences = _layer.Args.ReturnSequences; + _stateful = _layer.Args.Stateful; + _layer_args_copy = _layer.Args.Clone(); + // We don't want to track `layer` since we're already tracking the two + // copies of it we actually run. + // TODO(Wanglongzhi2001), since the feature of setattr_tracking has not been implemented. + // _setattr_tracking = false; + // super().__init__(layer, **kwargs) + // _setattr_tracking = true; + + // Recreate the forward layer from the original layer config, so that it + // will not carry over any state from the layer. + var actualType = _layer.GetType(); + if (actualType == typeof(LSTM)) + { + var arg = _layer_args_copy as LSTMArgs; + _forward_layer = new LSTM(arg); + } + // TODO(Wanglongzhi2001), add GRU if case. + else + { + _forward_layer = new RNN(_layer.Cell, _layer_args_copy); + } + //_forward_layer = _recreate_layer_from_config(_layer); + if (_args.BackwardLayer is null) + { + _backward_layer = _recreate_layer_from_config(_layer, go_backwards:true); + } + else + { + _backward_layer = _args.BackwardLayer as RNN; + } + _forward_layer.Name = "forward_" + _forward_layer.Name; + _backward_layer.Name = "backward_" + _backward_layer.Name; + _verify_layer_config(); + + void force_zero_output_for_mask(RNN layer) + { + layer.Args.ZeroOutputForMask = layer.Args.ReturnSequences; + } + + force_zero_output_for_mask(_forward_layer); + force_zero_output_for_mask(_backward_layer); + + if (_args.Weights is not null) + { + var nw = len(_args.Weights); + _forward_layer.set_weights(_args.Weights[$":,{nw / 2}"]); + _backward_layer.set_weights(_args.Weights[$"{nw / 2},:"]); + } + + _input_spec = _layer.InputSpec; + } + + private void _verify_layer_config() + { + if (_forward_layer.Args.GoBackwards == _backward_layer.Args.GoBackwards) + { + throw new ValueError( + "Forward layer and backward layer should have different " + + "`go_backwards` value." + + "forward_layer.go_backwards = " + + $"{_forward_layer.Args.GoBackwards}," + + "backward_layer.go_backwards = " + + $"{_backward_layer.Args.GoBackwards}"); + } + if (_forward_layer.Args.Stateful != _backward_layer.Args.Stateful) + { + throw new ValueError( + "Forward layer and backward layer are expected to have "+ + $"the same value for attribute stateful, got "+ + $"{_forward_layer.Args.Stateful} for forward layer and "+ + $"{_backward_layer.Args.Stateful} for backward layer"); + } + if (_forward_layer.Args.ReturnState != _backward_layer.Args.ReturnState) + { + throw new ValueError( + "Forward layer and backward layer are expected to have " + + $"the same value for attribute return_state, got " + + $"{_forward_layer.Args.ReturnState} for forward layer and " + + $"{_backward_layer.Args.ReturnState} for backward layer"); + } + if (_forward_layer.Args.ReturnSequences != _backward_layer.Args.ReturnSequences) + { + throw new ValueError( + "Forward layer and backward layer are expected to have " + + $"the same value for attribute return_sequences, got " + + $"{_forward_layer.Args.ReturnSequences} for forward layer and " + + $"{_backward_layer.Args.ReturnSequences} for backward layer"); + } + } + + private RNN _recreate_layer_from_config(RNN layer, bool go_backwards = false) + { + var config = layer.get_config() as RNNArgs; + var cell = layer.Cell; + if (go_backwards) + { + config.GoBackwards = !config.GoBackwards; + } + var actualType = layer.GetType(); + if (actualType == typeof(LSTM)) + { + var arg = config as LSTMArgs; + return new LSTM(arg); + } + else + { + return new RNN(cell, config); + } + } + + public override void build(KerasShapesWrapper input_shape) + { + _buildInputShape = input_shape; + tf_with(ops.name_scope(_forward_layer.Name), scope=> + { + _forward_layer.build(input_shape); + }); + tf_with(ops.name_scope(_backward_layer.Name), scope => + { + _backward_layer.build(input_shape); + }); + built = true; + } + + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) + { + // `Bidirectional.call` implements the same API as the wrapped `RNN`. + + Tensors forward_inputs; + Tensors backward_inputs; + Tensors forward_state; + Tensors backward_state; + // if isinstance(inputs, list) and len(inputs) > 1: + if (inputs.Length > 1) + { + // initial_states are keras tensors, which means they are passed + // in together with inputs as list. The initial_states need to be + // split into forward and backward section, and be feed to layers + // accordingly. + forward_inputs = new Tensors { inputs[0] }; + backward_inputs = new Tensors { inputs[0] }; + var pivot = (len(inputs) - _num_constants) / 2 + 1; + // add forward initial state + forward_inputs.Concat(new Tensors { inputs[$"1:{pivot}"] }); + if (_num_constants != 0) + // add backward initial state + backward_inputs.Concat(new Tensors { inputs[$"{pivot}:"] }); + else + { + // add backward initial state + backward_inputs.Concat(new Tensors { inputs[$"{pivot}:{-_num_constants}"] }); + // add constants for forward and backward layers + forward_inputs.Concat(new Tensors { inputs[$"{-_num_constants}:"] }); + backward_inputs.Concat(new Tensors { inputs[$"{-_num_constants}:"] }); + } + forward_state = null; + backward_state = null; + } + else if (state is not null) + { + // initial_states are not keras tensors, eg eager tensor from np + // array. They are only passed in from kwarg initial_state, and + // should be passed to forward/backward layer via kwarg + // initial_state as well. + forward_inputs = inputs; + backward_inputs = inputs; + var half = len(state) / 2; + forward_state = state[$":{half}"]; + backward_state = state[$"{half}:"]; + } + else + { + forward_inputs = inputs; + backward_inputs = inputs; + forward_state = null; + backward_state = null; + } + var y = _forward_layer.Apply(forward_inputs, forward_state); + var y_rev = _backward_layer.Apply(backward_inputs, backward_state); + + Tensors states = new(); + if (_return_state) + { + states = y["1:"] + y_rev["1:"]; + y = y[0]; + y_rev = y_rev[0]; + } + + if (_return_sequences) + { + int time_dim = _forward_layer.Args.TimeMajor ? 0 : 1; + y_rev = keras.backend.reverse(y_rev, time_dim); + } + Tensors output; + if (_args.MergeMode == "concat") + output = keras.backend.concatenate(new Tensors { y.Single(), y_rev.Single() }); + else if (_args.MergeMode == "sum") + output = y.Single() + y_rev.Single(); + else if (_args.MergeMode == "ave") + output = (y.Single() + y_rev.Single()) / 2; + else if (_args.MergeMode == "mul") + output = y.Single() * y_rev.Single(); + else if (_args.MergeMode is null) + output = new Tensors { y.Single(), y_rev.Single() }; + else + throw new ValueError( + "Unrecognized value for `merge_mode`. " + + $"Received: {_args.MergeMode}" + + "Expected values are [\"concat\", \"sum\", \"ave\", \"mul\"]"); + if (_return_state) + { + if (_args.MergeMode is not null) + return new Tensors { output.Single(), states.Single()}; + } + return output; + } + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs index b5d583248..c766e8d69 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs @@ -3,6 +3,7 @@ using Tensorflow.Keras.Engine; using Tensorflow.Common.Types; using Tensorflow.Common.Extensions; +using Tensorflow.Keras.Saving; namespace Tensorflow.Keras.Layers { @@ -14,15 +15,15 @@ namespace Tensorflow.Keras.Layers /// public class LSTM : RNN { - LSTMArgs args; + LSTMArgs _args; InputSpec[] _state_spec; InputSpec _input_spec; bool _could_use_gpu_kernel; - + public LSTMArgs Args { get => _args; } public LSTM(LSTMArgs args) : base(CreateCell(args), args) { - this.args = args; + _args = args; _input_spec = new InputSpec(ndim: 3); _state_spec = new[] { args.Units, args.Units }.Select(dim => new InputSpec(shape: (-1, dim))).ToArray(); _could_use_gpu_kernel = args.Activation == keras.activations.Tanh @@ -71,7 +72,7 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo var single_input = inputs.Single; var input_shape = single_input.shape; - var timesteps = args.TimeMajor ? input_shape[0] : input_shape[1]; + var timesteps = _args.TimeMajor ? input_shape[0] : input_shape[1]; _maybe_reset_cell_dropout_mask(Cell); @@ -87,26 +88,26 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo inputs, initial_state, constants: null, - go_backwards: args.GoBackwards, + go_backwards: _args.GoBackwards, mask: mask, - unroll: args.Unroll, + unroll: _args.Unroll, input_length: ops.convert_to_tensor(timesteps), - time_major: args.TimeMajor, - zero_output_for_mask: args.ZeroOutputForMask, - return_all_outputs: args.ReturnSequences + time_major: _args.TimeMajor, + zero_output_for_mask: _args.ZeroOutputForMask, + return_all_outputs: _args.ReturnSequences ); Tensor output; - if (args.ReturnSequences) + if (_args.ReturnSequences) { - output = keras.backend.maybe_convert_to_ragged(false, outputs, (int)timesteps, args.GoBackwards); + output = keras.backend.maybe_convert_to_ragged(false, outputs, (int)timesteps, _args.GoBackwards); } else { output = last_output; } - if (args.ReturnState) + if (_args.ReturnState) { return new Tensor[] { output }.Concat(states).ToArray().ToTensors(); } @@ -115,5 +116,11 @@ protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bo return output; } } + + public override IKerasConfig get_config() + { + return _args; + } + } } diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index 0e81d20e3..c19222614 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -31,7 +31,9 @@ public class RNN : RnnBase protected IVariableV1 _kernel; protected IVariableV1 _bias; private IRnnCell _cell; - protected IRnnCell Cell + + public RNNArgs Args { get => _args; } + public IRnnCell Cell { get { @@ -570,10 +572,13 @@ protected Tensors get_initial_state(Tensors inputs) var input_shape = array_ops.shape(inputs); var batch_size = _args.TimeMajor ? input_shape[1] : input_shape[0]; var dtype = input.dtype; - Tensors init_state = Cell.GetInitialState(null, batch_size, dtype); - return init_state; } + + public override IKerasConfig get_config() + { + return _args; + } } } diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index 5f7bd574e..03159346a 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -5,6 +5,7 @@ using System.Text; using System.Threading.Tasks; using Tensorflow.Common.Types; +using Tensorflow.Keras.ArgsDefinition; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Layers; using Tensorflow.Keras.Saving; @@ -38,8 +39,6 @@ public void StackedRNNCell() var cells = new IRnnCell[] { tf.keras.layers.SimpleRNNCell(4), tf.keras.layers.SimpleRNNCell(5) }; var stackedRNNCell = tf.keras.layers.StackedRNNCells(cells); var (output, state) = stackedRNNCell.Apply(inputs, states); - Console.WriteLine(output); - Console.WriteLine(state.shape); Assert.AreEqual((32, 5), output.shape); Assert.AreEqual((32, 4), state[0].shape); } @@ -108,6 +107,7 @@ public void RNNForSimpleRNNCell() var inputs = tf.random.normal((32, 10, 8)); var cell = tf.keras.layers.SimpleRNNCell(10, dropout: 0.5f, recurrent_dropout: 0.5f); var rnn = tf.keras.layers.RNN(cell: cell); + var cgf = rnn.get_config(); var output = rnn.Apply(inputs); Assert.AreEqual((32, 10), output.shape); @@ -145,5 +145,14 @@ public void GRUCell() Assert.AreEqual((32, 4), output.shape); } + + [TestMethod] + public void Bidirectional() + { + var bi = tf.keras.layers.Bidirectional(keras.layers.LSTM(10, return_sequences:true)); + var inputs = tf.random.normal((32, 10, 8)); + var outputs = bi.Apply(inputs); + Assert.AreEqual((32, 10, 20), outputs.shape); + } } } From 737910df9e3eca18e094a2bffefa5516efc9ebf3 Mon Sep 17 00:00:00 2001 From: Beacontownfc <89081023+Beacontownfc@users.noreply.github.com> Date: Sat, 22 Jul 2023 14:23:08 +0800 Subject: [PATCH 099/182] Fix: model.load_weights --- src/TensorFlowNET.Keras/Saving/hdf5_format.cs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs index 8ac9fddf6..dd6609bc7 100644 --- a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs +++ b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs @@ -133,10 +133,8 @@ public static void load_weights_from_hdf5_group(long f, List layers) long g = H5G.open(f, name); var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); foreach (var i_ in weight_names) - { - var vm = Regex.Replace(i_, "/", "$"); - vm = i_.Split('/')[0] + "/$" + vm.Substring(i_.Split('/')[0].Length + 1, i_.Length - i_.Split('/')[0].Length - 1); - (success, Array result) = Hdf5.ReadDataset(g, vm); + { + (success, Array result) = Hdf5.ReadDataset(g, i_); if (success) weight_values.Add(np.array(result)); } @@ -196,9 +194,14 @@ public static void save_weights_to_hdf5_group(long f, List layers) var tensor = val.AsTensor(); if (name.IndexOf("/") > 1) { - var crDataGroup = Hdf5.CreateOrOpenGroup(g, Hdf5Utils.NormalizedName(name.Split('/')[0])); - var _name = Regex.Replace(name.Substring(name.Split('/')[0].Length, name.Length - name.Split('/')[0].Length), "/", "$"); - WriteDataset(crDataGroup, _name, tensor); + var crDataGroup = g; + string[] name_split = name.Split('/'); + for(int i = 0; i < name_split.Length; i++) + { + if (i == name_split.Length - 1) break; + crDataGroup = Hdf5.CreateOrOpenGroup(crDataGroup, Hdf5Utils.NormalizedName(name_split[i])); + } + WriteDataset(crDataGroup, name_split[name_split.Length - 1], tensor); Hdf5.CloseGroup(crDataGroup); } else From 05dbe134f8f00fa62aa9cda2337891f4ce66c453 Mon Sep 17 00:00:00 2001 From: Beacontownfc <89081023+Beacontownfc@users.noreply.github.com> Date: Sat, 22 Jul 2023 14:32:33 +0800 Subject: [PATCH 100/182] Update hdf5_format.cs --- src/TensorFlowNET.Keras/Saving/hdf5_format.cs | 707 +++++++++--------- 1 file changed, 353 insertions(+), 354 deletions(-) diff --git a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs index dd6609bc7..c80f653f8 100644 --- a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs +++ b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs @@ -1,355 +1,354 @@ -using System; -using System.Collections.Generic; -using System.Text; -using HDF.PInvoke; -using Tensorflow.NumPy; -using HDF5CSharp; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; -using System.Linq; -using System.Text.RegularExpressions; - -namespace Tensorflow.Keras.Saving -{ - public class hdf5_format - { - private static int HDF5_OBJECT_HEADER_LIMIT = 64512; - public static void load_model_from_hdf5(string filepath = "", Dictionary custom_objects = null, bool compile = false) - { - long root = Hdf5.OpenFile(filepath,true); - load_model_from_hdf5(root, custom_objects, compile); - } - public static void load_model_from_hdf5(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - //long fileId = filepath; - //try - //{ - // groupId = H5G.open(fileId, "/"); - // (bool success, string[] attrId) = Hdf5.ReadStringAttributes(groupId, "model_config", ""); - // H5G.close(groupId); - // if (success == true) { - // Console.WriteLine(attrId[0]); - // } - //} - //catch (Exception ex) - //{ - // if (filepath != -1) { - // Hdf5.CloseFile(filepath); - // } - // if (groupId != -1) { - // H5G.close(groupId); - // } - // throw new Exception(ex.ToString()); - //} - - } - public static void save_model_to_hdf5(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - /// - /// Preprocess layer weights between different Keras formats. - /// - /// - /// - /// - /// - public static List preprocess_weights_for_loading(ILayer layer, List weights, string original_keras_version = null, string original_backend = null) - { - // convert CuDNN layers - return _convert_rnn_weights(layer, weights); - } - - /// - /// Converts weights for RNN layers between native and CuDNN format. - /// - /// - /// - static List _convert_rnn_weights(ILayer layer, List weights) - { - var target_class = layer.GetType().Name; - return weights; - } - - public static void save_optimizer_weights_to_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static void load_optimizer_weights_from_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static void load_weights_from_hdf5_group(long f, List layers) - { - string original_keras_version = "2.5.0"; - string original_backend = null; - var (success, attr) = Hdf5.ReadStringAttributes(f, "keras_version", "", true); - if (success) - original_keras_version = attr.First(); - // keras version should be 2.5.0+ - var ver_major = int.Parse(original_keras_version.Split('.')[0]); - var ver_minor = int.Parse(original_keras_version.Split('.')[1]); - if (ver_major < 2 || (ver_major == 2 && ver_minor < 5)) - throw new ValueError("keras version should be 2.5.0 or later."); - - (success, attr) = Hdf5.ReadStringAttributes(f, "backend", "", true); - if (success) - original_backend = attr.First(); - - var filtered_layers = new List(); - foreach (var layer in layers) - { - var weights = _legacy_weights(layer); - if (weights.Count > 0) - filtered_layers.append(layer); - } - - string[] layer_names = load_attributes_from_hdf5_group(f, "layer_names"); - var filtered_layer_names = new List(); - foreach(var name in layer_names) - { - if (!filtered_layers.Select(x => x.Name).Contains(name)) - continue; - long g = H5G.open(f, name); - var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); - if (weight_names.Count() > 0) - filtered_layer_names.Add(name); - H5G.close(g); - } - - layer_names = filtered_layer_names.ToArray(); - if (layer_names.Length != filtered_layers.Count()) - throw new ValueError("You are trying to load a weight file " + - $"containing {layer_names}" + - $" layers into a model with {filtered_layers.Count} layers."); - - var weight_value_tuples = new List<(IVariableV1, NDArray)>(); - foreach (var (k, name) in enumerate(layer_names)) - { - var weight_values = new List(); - long g = H5G.open(f, name); - var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); - foreach (var i_ in weight_names) - { - (success, Array result) = Hdf5.ReadDataset(g, i_); - if (success) - weight_values.Add(np.array(result)); - } - H5G.close(g); - var layer = filtered_layers[k]; - var symbolic_weights = _legacy_weights(layer); - preprocess_weights_for_loading(layer, weight_values, original_keras_version, original_backend); - if (weight_values.Count() != symbolic_weights.Count()) - throw new ValueError($"Layer #{k} (named {layer.Name}" + - "in the current model) was found to " + - $"correspond to layer {name} in the save file." + - $"However the new layer {layer.Name} expects " + - $"{symbolic_weights.Count()} weights, but the saved weights have " + - $"{weight_values.Count()} elements."); - weight_value_tuples.AddRange(zip(symbolic_weights, weight_values)); - } - - keras.backend.batch_set_value(weight_value_tuples); - } - - public static void toarrayf4(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static void load_weights_from_hdf5_group_by_name(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static void save_weights_to_hdf5_group(long f, List layers) - { - List layerName=new List(); - foreach (var layer in layers) - { - layerName.Add(layer.Name); - } - save_attributes_to_hdf5_group(f, "layer_names", layerName.ToArray()); - Hdf5.WriteAttribute(f, "backend", "tensorflow"); - Hdf5.WriteAttribute(f, "keras_version", "2.5.0"); - - foreach (var layer in layers) - { - var weights = _legacy_weights(layer); - if (weights.Count == 0) - continue; - - var weight_names = new List(); - // weight_values= keras.backend.batch_get_value(weights); - foreach (var weight in weights) - weight_names.Add(weight.Name); - - var g = Hdf5.CreateOrOpenGroup(f, Hdf5Utils.NormalizedName(layer.Name)); - save_attributes_to_hdf5_group(g, "weight_names", weight_names.ToArray()); - foreach (var (name, val) in zip(weight_names, weights)) - { - var tensor = val.AsTensor(); - if (name.IndexOf("/") > 1) - { - var crDataGroup = g; - string[] name_split = name.Split('/'); - for(int i = 0; i < name_split.Length; i++) - { - if (i == name_split.Length - 1) break; +using System; +using System.Collections.Generic; +using System.Text; +using HDF.PInvoke; +using Tensorflow.NumPy; +using HDF5CSharp; +using static Tensorflow.Binding; +using static Tensorflow.KerasApi; +using System.Linq; +using System.Text.RegularExpressions; + +namespace Tensorflow.Keras.Saving +{ + public class hdf5_format + { + private static int HDF5_OBJECT_HEADER_LIMIT = 64512; + public static void load_model_from_hdf5(string filepath = "", Dictionary custom_objects = null, bool compile = false) + { + long root = Hdf5.OpenFile(filepath,true); + load_model_from_hdf5(root, custom_objects, compile); + } + public static void load_model_from_hdf5(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + //long fileId = filepath; + //try + //{ + // groupId = H5G.open(fileId, "/"); + // (bool success, string[] attrId) = Hdf5.ReadStringAttributes(groupId, "model_config", ""); + // H5G.close(groupId); + // if (success == true) { + // Console.WriteLine(attrId[0]); + // } + //} + //catch (Exception ex) + //{ + // if (filepath != -1) { + // Hdf5.CloseFile(filepath); + // } + // if (groupId != -1) { + // H5G.close(groupId); + // } + // throw new Exception(ex.ToString()); + //} + + } + public static void save_model_to_hdf5(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + /// + /// Preprocess layer weights between different Keras formats. + /// + /// + /// + /// + /// + public static List preprocess_weights_for_loading(ILayer layer, List weights, string original_keras_version = null, string original_backend = null) + { + // convert CuDNN layers + return _convert_rnn_weights(layer, weights); + } + + /// + /// Converts weights for RNN layers between native and CuDNN format. + /// + /// + /// + static List _convert_rnn_weights(ILayer layer, List weights) + { + var target_class = layer.GetType().Name; + return weights; + } + + public static void save_optimizer_weights_to_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static void load_optimizer_weights_from_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static void load_weights_from_hdf5_group(long f, List layers) + { + string original_keras_version = "2.5.0"; + string original_backend = null; + var (success, attr) = Hdf5.ReadStringAttributes(f, "keras_version", "", true); + if (success) + original_keras_version = attr.First(); + // keras version should be 2.5.0+ + var ver_major = int.Parse(original_keras_version.Split('.')[0]); + var ver_minor = int.Parse(original_keras_version.Split('.')[1]); + if (ver_major < 2 || (ver_major == 2 && ver_minor < 5)) + throw new ValueError("keras version should be 2.5.0 or later."); + + (success, attr) = Hdf5.ReadStringAttributes(f, "backend", "", true); + if (success) + original_backend = attr.First(); + + var filtered_layers = new List(); + foreach (var layer in layers) + { + var weights = _legacy_weights(layer); + if (weights.Count > 0) + filtered_layers.append(layer); + } + + string[] layer_names = load_attributes_from_hdf5_group(f, "layer_names"); + var filtered_layer_names = new List(); + foreach(var name in layer_names) + { + if (!filtered_layers.Select(x => x.Name).Contains(name)) + continue; + long g = H5G.open(f, name); + var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); + if (weight_names.Count() > 0) + filtered_layer_names.Add(name); + H5G.close(g); + } + + layer_names = filtered_layer_names.ToArray(); + if (layer_names.Length != filtered_layers.Count()) + throw new ValueError("You are trying to load a weight file " + + $"containing {layer_names}" + + $" layers into a model with {filtered_layers.Count} layers."); + + var weight_value_tuples = new List<(IVariableV1, NDArray)>(); + foreach (var (k, name) in enumerate(layer_names)) + { + var weight_values = new List(); + long g = H5G.open(f, name); + var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); + foreach (var i_ in weight_names) + { + (success, Array result) = Hdf5.ReadDataset(g, i_); + if (success) + weight_values.Add(np.array(result)); + } + H5G.close(g); + var layer = filtered_layers[k]; + var symbolic_weights = _legacy_weights(layer); + preprocess_weights_for_loading(layer, weight_values, original_keras_version, original_backend); + if (weight_values.Count() != symbolic_weights.Count()) + throw new ValueError($"Layer #{k} (named {layer.Name}" + + "in the current model) was found to " + + $"correspond to layer {name} in the save file." + + $"However the new layer {layer.Name} expects " + + $"{symbolic_weights.Count()} weights, but the saved weights have " + + $"{weight_values.Count()} elements."); + weight_value_tuples.AddRange(zip(symbolic_weights, weight_values)); + } + + keras.backend.batch_set_value(weight_value_tuples); + } + + public static void toarrayf4(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static void load_weights_from_hdf5_group_by_name(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static void save_weights_to_hdf5_group(long f, List layers) + { + List layerName=new List(); + foreach (var layer in layers) + { + layerName.Add(layer.Name); + } + save_attributes_to_hdf5_group(f, "layer_names", layerName.ToArray()); + Hdf5.WriteAttribute(f, "backend", "tensorflow"); + Hdf5.WriteAttribute(f, "keras_version", "2.5.0"); + + foreach (var layer in layers) + { + var weights = _legacy_weights(layer); + if (weights.Count == 0) + continue; + + var weight_names = new List(); + // weight_values= keras.backend.batch_get_value(weights); + foreach (var weight in weights) + weight_names.Add(weight.Name); + + var g = Hdf5.CreateOrOpenGroup(f, Hdf5Utils.NormalizedName(layer.Name)); + save_attributes_to_hdf5_group(g, "weight_names", weight_names.ToArray()); + foreach (var (name, val) in zip(weight_names, weights)) + { + var tensor = val.AsTensor(); + if (name.IndexOf("/") > 1) + { + var crDataGroup = g; + string[] name_split = name.Split('/'); + for(int i = 0; i < name_split.Length - 1; i++) + { crDataGroup = Hdf5.CreateOrOpenGroup(crDataGroup, Hdf5Utils.NormalizedName(name_split[i])); - } - WriteDataset(crDataGroup, name_split[name_split.Length - 1], tensor); - Hdf5.CloseGroup(crDataGroup); - } - else - { - WriteDataset(g, name, tensor); - } - } - Hdf5.CloseGroup(g); - } - } - - private static void save_attributes_to_hdf5_group(long f, string name, Array data) - { - int num_chunks = 1; - - var chunked_data = Split(data, num_chunks); - int getSize = 0; - - string getType = data.Length > 0 ? data.GetValue(0).GetType().Name.ToLower() : "string"; - - switch (getType) - { - case "single": - getSize = sizeof(float); - break; - case "double": - getSize = sizeof(double); - break; - case "string": - getSize = -1; - break; - case "int32": - getSize = sizeof(int); - break; - case "int64": - getSize = sizeof(long); - break; - default: - getSize = -1; - break; - } - int getCount = chunked_data.Count; - - if (getSize != -1) - { - num_chunks = (int)Math.Ceiling((double)(getCount * getSize) / HDF5_OBJECT_HEADER_LIMIT); - if (num_chunks > 1) chunked_data = Split(data, num_chunks); - } - - if (num_chunks > 1) - { - foreach (var (chunk_id, chunk_data) in enumerate(chunked_data)) - WriteAttrs(f, getType, $"{name}{chunk_id}", chunk_data.ToArray()); - } - else - { - WriteAttrs(f, getType, name, data); - } - } - - private static void WriteDataset(long f, string name, Tensor data) - { - switch (data.dtype) - { - case TF_DataType.TF_FLOAT: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - case TF_DataType.TF_DOUBLE: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - case TF_DataType.TF_INT32: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - case TF_DataType.TF_INT64: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - default: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - } - } - - private static void WriteAttrs(long f,string typename, string name, Array data) - { - switch (typename) - { - case "single": - Hdf5.WriteAttributes(f, name, data); - break; - case "double": - Hdf5.WriteAttributes(f, name, data); - break; - case "string": - Hdf5.WriteAttributes(f, name, data); - break; - case "int32": - Hdf5.WriteAttributes(f, name, data); - break; - case "int64": - Hdf5.WriteAttributes(f, name, data); - break; - default: - Hdf5.WriteAttributes(f, name,data); - break; - } - } - - private static List> Split(Array list, int chunkSize) - { - var splitList = new List>(); - var chunkCount = (int)Math.Ceiling((double)list.Length / (double)chunkSize); - - for (int c = 0; c < chunkCount; c++) - { - var skip = c * chunkSize; - var take = skip + chunkSize; - var chunk = new List(chunkSize); - - for (int e = skip; e < take && e < list.Length; e++) - { - chunk.Add(list.GetValue(e)); - } - splitList.Add(chunk); - } - - return splitList; - } - - public static string[] load_attributes_from_hdf5_group(long group, string name) - { - var (success, attr) = Hdf5.ReadStringAttributes(group, name, "", true); - if (success) - return attr.ToArray(); - - return null; - } - - public static void load_attributes_from_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static List _legacy_weights(ILayer layer) - { - var weights = layer.TrainableWeights.Select(x => x).ToList(); - weights.AddRange(layer.NonTrainableWeights); - return weights; - } - } -} - + } + WriteDataset(crDataGroup, name_split[name_split.Length - 1], tensor); + Hdf5.CloseGroup(crDataGroup); + } + else + { + WriteDataset(g, name, tensor); + } + } + Hdf5.CloseGroup(g); + } + } + + private static void save_attributes_to_hdf5_group(long f, string name, Array data) + { + int num_chunks = 1; + + var chunked_data = Split(data, num_chunks); + int getSize = 0; + + string getType = data.Length > 0 ? data.GetValue(0).GetType().Name.ToLower() : "string"; + + switch (getType) + { + case "single": + getSize = sizeof(float); + break; + case "double": + getSize = sizeof(double); + break; + case "string": + getSize = -1; + break; + case "int32": + getSize = sizeof(int); + break; + case "int64": + getSize = sizeof(long); + break; + default: + getSize = -1; + break; + } + int getCount = chunked_data.Count; + + if (getSize != -1) + { + num_chunks = (int)Math.Ceiling((double)(getCount * getSize) / HDF5_OBJECT_HEADER_LIMIT); + if (num_chunks > 1) chunked_data = Split(data, num_chunks); + } + + if (num_chunks > 1) + { + foreach (var (chunk_id, chunk_data) in enumerate(chunked_data)) + WriteAttrs(f, getType, $"{name}{chunk_id}", chunk_data.ToArray()); + } + else + { + WriteAttrs(f, getType, name, data); + } + } + + private static void WriteDataset(long f, string name, Tensor data) + { + switch (data.dtype) + { + case TF_DataType.TF_FLOAT: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + case TF_DataType.TF_DOUBLE: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + case TF_DataType.TF_INT32: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + case TF_DataType.TF_INT64: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + default: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + } + } + + private static void WriteAttrs(long f,string typename, string name, Array data) + { + switch (typename) + { + case "single": + Hdf5.WriteAttributes(f, name, data); + break; + case "double": + Hdf5.WriteAttributes(f, name, data); + break; + case "string": + Hdf5.WriteAttributes(f, name, data); + break; + case "int32": + Hdf5.WriteAttributes(f, name, data); + break; + case "int64": + Hdf5.WriteAttributes(f, name, data); + break; + default: + Hdf5.WriteAttributes(f, name,data); + break; + } + } + + private static List> Split(Array list, int chunkSize) + { + var splitList = new List>(); + var chunkCount = (int)Math.Ceiling((double)list.Length / (double)chunkSize); + + for (int c = 0; c < chunkCount; c++) + { + var skip = c * chunkSize; + var take = skip + chunkSize; + var chunk = new List(chunkSize); + + for (int e = skip; e < take && e < list.Length; e++) + { + chunk.Add(list.GetValue(e)); + } + splitList.Add(chunk); + } + + return splitList; + } + + public static string[] load_attributes_from_hdf5_group(long group, string name) + { + var (success, attr) = Hdf5.ReadStringAttributes(group, name, "", true); + if (success) + return attr.ToArray(); + + return null; + } + + public static void load_attributes_from_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static List _legacy_weights(ILayer layer) + { + var weights = layer.TrainableWeights.Select(x => x).ToList(); + weights.AddRange(layer.NonTrainableWeights); + return weights; + } + } +} + From 8b17b14f30e288705552a5ca417264b35b8447bc Mon Sep 17 00:00:00 2001 From: Beacontownfc <89081023+Beacontownfc@users.noreply.github.com> Date: Sat, 22 Jul 2023 14:34:08 +0800 Subject: [PATCH 101/182] Update hdf5_format.cs --- src/TensorFlowNET.Keras/Saving/hdf5_format.cs | 708 +++++++++--------- 1 file changed, 354 insertions(+), 354 deletions(-) diff --git a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs index c80f653f8..bab0efecf 100644 --- a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs +++ b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs @@ -1,354 +1,354 @@ -using System; -using System.Collections.Generic; -using System.Text; -using HDF.PInvoke; -using Tensorflow.NumPy; -using HDF5CSharp; -using static Tensorflow.Binding; -using static Tensorflow.KerasApi; -using System.Linq; -using System.Text.RegularExpressions; - -namespace Tensorflow.Keras.Saving -{ - public class hdf5_format - { - private static int HDF5_OBJECT_HEADER_LIMIT = 64512; - public static void load_model_from_hdf5(string filepath = "", Dictionary custom_objects = null, bool compile = false) - { - long root = Hdf5.OpenFile(filepath,true); - load_model_from_hdf5(root, custom_objects, compile); - } - public static void load_model_from_hdf5(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - //long fileId = filepath; - //try - //{ - // groupId = H5G.open(fileId, "/"); - // (bool success, string[] attrId) = Hdf5.ReadStringAttributes(groupId, "model_config", ""); - // H5G.close(groupId); - // if (success == true) { - // Console.WriteLine(attrId[0]); - // } - //} - //catch (Exception ex) - //{ - // if (filepath != -1) { - // Hdf5.CloseFile(filepath); - // } - // if (groupId != -1) { - // H5G.close(groupId); - // } - // throw new Exception(ex.ToString()); - //} - - } - public static void save_model_to_hdf5(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - /// - /// Preprocess layer weights between different Keras formats. - /// - /// - /// - /// - /// - public static List preprocess_weights_for_loading(ILayer layer, List weights, string original_keras_version = null, string original_backend = null) - { - // convert CuDNN layers - return _convert_rnn_weights(layer, weights); - } - - /// - /// Converts weights for RNN layers between native and CuDNN format. - /// - /// - /// - static List _convert_rnn_weights(ILayer layer, List weights) - { - var target_class = layer.GetType().Name; - return weights; - } - - public static void save_optimizer_weights_to_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static void load_optimizer_weights_from_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static void load_weights_from_hdf5_group(long f, List layers) - { - string original_keras_version = "2.5.0"; - string original_backend = null; - var (success, attr) = Hdf5.ReadStringAttributes(f, "keras_version", "", true); - if (success) - original_keras_version = attr.First(); - // keras version should be 2.5.0+ - var ver_major = int.Parse(original_keras_version.Split('.')[0]); - var ver_minor = int.Parse(original_keras_version.Split('.')[1]); - if (ver_major < 2 || (ver_major == 2 && ver_minor < 5)) - throw new ValueError("keras version should be 2.5.0 or later."); - - (success, attr) = Hdf5.ReadStringAttributes(f, "backend", "", true); - if (success) - original_backend = attr.First(); - - var filtered_layers = new List(); - foreach (var layer in layers) - { - var weights = _legacy_weights(layer); - if (weights.Count > 0) - filtered_layers.append(layer); - } - - string[] layer_names = load_attributes_from_hdf5_group(f, "layer_names"); - var filtered_layer_names = new List(); - foreach(var name in layer_names) - { - if (!filtered_layers.Select(x => x.Name).Contains(name)) - continue; - long g = H5G.open(f, name); - var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); - if (weight_names.Count() > 0) - filtered_layer_names.Add(name); - H5G.close(g); - } - - layer_names = filtered_layer_names.ToArray(); - if (layer_names.Length != filtered_layers.Count()) - throw new ValueError("You are trying to load a weight file " + - $"containing {layer_names}" + - $" layers into a model with {filtered_layers.Count} layers."); - - var weight_value_tuples = new List<(IVariableV1, NDArray)>(); - foreach (var (k, name) in enumerate(layer_names)) - { - var weight_values = new List(); - long g = H5G.open(f, name); - var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); - foreach (var i_ in weight_names) - { - (success, Array result) = Hdf5.ReadDataset(g, i_); - if (success) - weight_values.Add(np.array(result)); - } - H5G.close(g); - var layer = filtered_layers[k]; - var symbolic_weights = _legacy_weights(layer); - preprocess_weights_for_loading(layer, weight_values, original_keras_version, original_backend); - if (weight_values.Count() != symbolic_weights.Count()) - throw new ValueError($"Layer #{k} (named {layer.Name}" + - "in the current model) was found to " + - $"correspond to layer {name} in the save file." + - $"However the new layer {layer.Name} expects " + - $"{symbolic_weights.Count()} weights, but the saved weights have " + - $"{weight_values.Count()} elements."); - weight_value_tuples.AddRange(zip(symbolic_weights, weight_values)); - } - - keras.backend.batch_set_value(weight_value_tuples); - } - - public static void toarrayf4(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static void load_weights_from_hdf5_group_by_name(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static void save_weights_to_hdf5_group(long f, List layers) - { - List layerName=new List(); - foreach (var layer in layers) - { - layerName.Add(layer.Name); - } - save_attributes_to_hdf5_group(f, "layer_names", layerName.ToArray()); - Hdf5.WriteAttribute(f, "backend", "tensorflow"); - Hdf5.WriteAttribute(f, "keras_version", "2.5.0"); - - foreach (var layer in layers) - { - var weights = _legacy_weights(layer); - if (weights.Count == 0) - continue; - - var weight_names = new List(); - // weight_values= keras.backend.batch_get_value(weights); - foreach (var weight in weights) - weight_names.Add(weight.Name); - - var g = Hdf5.CreateOrOpenGroup(f, Hdf5Utils.NormalizedName(layer.Name)); - save_attributes_to_hdf5_group(g, "weight_names", weight_names.ToArray()); - foreach (var (name, val) in zip(weight_names, weights)) - { - var tensor = val.AsTensor(); - if (name.IndexOf("/") > 1) - { - var crDataGroup = g; - string[] name_split = name.Split('/'); - for(int i = 0; i < name_split.Length - 1; i++) - { - crDataGroup = Hdf5.CreateOrOpenGroup(crDataGroup, Hdf5Utils.NormalizedName(name_split[i])); - } - WriteDataset(crDataGroup, name_split[name_split.Length - 1], tensor); - Hdf5.CloseGroup(crDataGroup); - } - else - { - WriteDataset(g, name, tensor); - } - } - Hdf5.CloseGroup(g); - } - } - - private static void save_attributes_to_hdf5_group(long f, string name, Array data) - { - int num_chunks = 1; - - var chunked_data = Split(data, num_chunks); - int getSize = 0; - - string getType = data.Length > 0 ? data.GetValue(0).GetType().Name.ToLower() : "string"; - - switch (getType) - { - case "single": - getSize = sizeof(float); - break; - case "double": - getSize = sizeof(double); - break; - case "string": - getSize = -1; - break; - case "int32": - getSize = sizeof(int); - break; - case "int64": - getSize = sizeof(long); - break; - default: - getSize = -1; - break; - } - int getCount = chunked_data.Count; - - if (getSize != -1) - { - num_chunks = (int)Math.Ceiling((double)(getCount * getSize) / HDF5_OBJECT_HEADER_LIMIT); - if (num_chunks > 1) chunked_data = Split(data, num_chunks); - } - - if (num_chunks > 1) - { - foreach (var (chunk_id, chunk_data) in enumerate(chunked_data)) - WriteAttrs(f, getType, $"{name}{chunk_id}", chunk_data.ToArray()); - } - else - { - WriteAttrs(f, getType, name, data); - } - } - - private static void WriteDataset(long f, string name, Tensor data) - { - switch (data.dtype) - { - case TF_DataType.TF_FLOAT: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - case TF_DataType.TF_DOUBLE: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - case TF_DataType.TF_INT32: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - case TF_DataType.TF_INT64: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - default: - Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); - break; - } - } - - private static void WriteAttrs(long f,string typename, string name, Array data) - { - switch (typename) - { - case "single": - Hdf5.WriteAttributes(f, name, data); - break; - case "double": - Hdf5.WriteAttributes(f, name, data); - break; - case "string": - Hdf5.WriteAttributes(f, name, data); - break; - case "int32": - Hdf5.WriteAttributes(f, name, data); - break; - case "int64": - Hdf5.WriteAttributes(f, name, data); - break; - default: - Hdf5.WriteAttributes(f, name,data); - break; - } - } - - private static List> Split(Array list, int chunkSize) - { - var splitList = new List>(); - var chunkCount = (int)Math.Ceiling((double)list.Length / (double)chunkSize); - - for (int c = 0; c < chunkCount; c++) - { - var skip = c * chunkSize; - var take = skip + chunkSize; - var chunk = new List(chunkSize); - - for (int e = skip; e < take && e < list.Length; e++) - { - chunk.Add(list.GetValue(e)); - } - splitList.Add(chunk); - } - - return splitList; - } - - public static string[] load_attributes_from_hdf5_group(long group, string name) - { - var (success, attr) = Hdf5.ReadStringAttributes(group, name, "", true); - if (success) - return attr.ToArray(); - - return null; - } - - public static void load_attributes_from_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) - { - - } - - public static List _legacy_weights(ILayer layer) - { - var weights = layer.TrainableWeights.Select(x => x).ToList(); - weights.AddRange(layer.NonTrainableWeights); - return weights; - } - } -} - +using System; +using System.Collections.Generic; +using System.Text; +using HDF.PInvoke; +using Tensorflow.NumPy; +using HDF5CSharp; +using static Tensorflow.Binding; +using static Tensorflow.KerasApi; +using System.Linq; +using System.Text.RegularExpressions; + +namespace Tensorflow.Keras.Saving +{ + public class hdf5_format + { + private static int HDF5_OBJECT_HEADER_LIMIT = 64512; + public static void load_model_from_hdf5(string filepath = "", Dictionary custom_objects = null, bool compile = false) + { + long root = Hdf5.OpenFile(filepath,true); + load_model_from_hdf5(root, custom_objects, compile); + } + public static void load_model_from_hdf5(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + //long fileId = filepath; + //try + //{ + // groupId = H5G.open(fileId, "/"); + // (bool success, string[] attrId) = Hdf5.ReadStringAttributes(groupId, "model_config", ""); + // H5G.close(groupId); + // if (success == true) { + // Console.WriteLine(attrId[0]); + // } + //} + //catch (Exception ex) + //{ + // if (filepath != -1) { + // Hdf5.CloseFile(filepath); + // } + // if (groupId != -1) { + // H5G.close(groupId); + // } + // throw new Exception(ex.ToString()); + //} + + } + public static void save_model_to_hdf5(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + /// + /// Preprocess layer weights between different Keras formats. + /// + /// + /// + /// + /// + public static List preprocess_weights_for_loading(ILayer layer, List weights, string original_keras_version = null, string original_backend = null) + { + // convert CuDNN layers + return _convert_rnn_weights(layer, weights); + } + + /// + /// Converts weights for RNN layers between native and CuDNN format. + /// + /// + /// + static List _convert_rnn_weights(ILayer layer, List weights) + { + var target_class = layer.GetType().Name; + return weights; + } + + public static void save_optimizer_weights_to_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static void load_optimizer_weights_from_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static void load_weights_from_hdf5_group(long f, List layers) + { + string original_keras_version = "2.5.0"; + string original_backend = null; + var (success, attr) = Hdf5.ReadStringAttributes(f, "keras_version", "", true); + if (success) + original_keras_version = attr.First(); + // keras version should be 2.5.0+ + var ver_major = int.Parse(original_keras_version.Split('.')[0]); + var ver_minor = int.Parse(original_keras_version.Split('.')[1]); + if (ver_major < 2 || (ver_major == 2 && ver_minor < 5)) + throw new ValueError("keras version should be 2.5.0 or later."); + + (success, attr) = Hdf5.ReadStringAttributes(f, "backend", "", true); + if (success) + original_backend = attr.First(); + + var filtered_layers = new List(); + foreach (var layer in layers) + { + var weights = _legacy_weights(layer); + if (weights.Count > 0) + filtered_layers.append(layer); + } + + string[] layer_names = load_attributes_from_hdf5_group(f, "layer_names"); + var filtered_layer_names = new List(); + foreach(var name in layer_names) + { + if (!filtered_layers.Select(x => x.Name).Contains(name)) + continue; + long g = H5G.open(f, name); + var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); + if (weight_names.Count() > 0) + filtered_layer_names.Add(name); + H5G.close(g); + } + + layer_names = filtered_layer_names.ToArray(); + if (layer_names.Length != filtered_layers.Count()) + throw new ValueError("You are trying to load a weight file " + + $"containing {layer_names}" + + $" layers into a model with {filtered_layers.Count} layers."); + + var weight_value_tuples = new List<(IVariableV1, NDArray)>(); + foreach (var (k, name) in enumerate(layer_names)) + { + var weight_values = new List(); + long g = H5G.open(f, name); + var weight_names = load_attributes_from_hdf5_group(g, "weight_names"); + foreach (var i_ in weight_names) + { + (success, Array result) = Hdf5.ReadDataset(g, i_); + if (success) + weight_values.Add(np.array(result)); + } + H5G.close(g); + var layer = filtered_layers[k]; + var symbolic_weights = _legacy_weights(layer); + preprocess_weights_for_loading(layer, weight_values, original_keras_version, original_backend); + if (weight_values.Count() != symbolic_weights.Count()) + throw new ValueError($"Layer #{k} (named {layer.Name}" + + "in the current model) was found to " + + $"correspond to layer {name} in the save file." + + $"However the new layer {layer.Name} expects " + + $"{symbolic_weights.Count()} weights, but the saved weights have " + + $"{weight_values.Count()} elements."); + weight_value_tuples.AddRange(zip(symbolic_weights, weight_values)); + } + + keras.backend.batch_set_value(weight_value_tuples); + } + + public static void toarrayf4(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static void load_weights_from_hdf5_group_by_name(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static void save_weights_to_hdf5_group(long f, List layers) + { + List layerName=new List(); + foreach (var layer in layers) + { + layerName.Add(layer.Name); + } + save_attributes_to_hdf5_group(f, "layer_names", layerName.ToArray()); + Hdf5.WriteAttribute(f, "backend", "tensorflow"); + Hdf5.WriteAttribute(f, "keras_version", "2.5.0"); + + foreach (var layer in layers) + { + var weights = _legacy_weights(layer); + if (weights.Count == 0) + continue; + + var weight_names = new List(); + // weight_values= keras.backend.batch_get_value(weights); + foreach (var weight in weights) + weight_names.Add(weight.Name); + + var g = Hdf5.CreateOrOpenGroup(f, Hdf5Utils.NormalizedName(layer.Name)); + save_attributes_to_hdf5_group(g, "weight_names", weight_names.ToArray()); + foreach (var (name, val) in zip(weight_names, weights)) + { + var tensor = val.AsTensor(); + if (name.IndexOf("/") > 1) + { + var crDataGroup = g; + string[] name_split = name.Split('/'); + for(int i = 0; i < name_split.Length - 1; i++) + { + crDataGroup = Hdf5.CreateOrOpenGroup(crDataGroup, Hdf5Utils.NormalizedName(name_split[i])); + } + WriteDataset(crDataGroup, name_split[name_split.Length - 1], tensor); + Hdf5.CloseGroup(crDataGroup); + } + else + { + WriteDataset(g, name, tensor); + } + } + Hdf5.CloseGroup(g); + } + } + + private static void save_attributes_to_hdf5_group(long f, string name, Array data) + { + int num_chunks = 1; + + var chunked_data = Split(data, num_chunks); + int getSize = 0; + + string getType = data.Length > 0 ? data.GetValue(0).GetType().Name.ToLower() : "string"; + + switch (getType) + { + case "single": + getSize = sizeof(float); + break; + case "double": + getSize = sizeof(double); + break; + case "string": + getSize = -1; + break; + case "int32": + getSize = sizeof(int); + break; + case "int64": + getSize = sizeof(long); + break; + default: + getSize = -1; + break; + } + int getCount = chunked_data.Count; + + if (getSize != -1) + { + num_chunks = (int)Math.Ceiling((double)(getCount * getSize) / HDF5_OBJECT_HEADER_LIMIT); + if (num_chunks > 1) chunked_data = Split(data, num_chunks); + } + + if (num_chunks > 1) + { + foreach (var (chunk_id, chunk_data) in enumerate(chunked_data)) + WriteAttrs(f, getType, $"{name}{chunk_id}", chunk_data.ToArray()); + } + else + { + WriteAttrs(f, getType, name, data); + } + } + + private static void WriteDataset(long f, string name, Tensor data) + { + switch (data.dtype) + { + case TF_DataType.TF_FLOAT: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + case TF_DataType.TF_DOUBLE: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + case TF_DataType.TF_INT32: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + case TF_DataType.TF_INT64: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + default: + Hdf5.WriteDatasetFromArray(f, name, data.numpy().ToMultiDimArray()); + break; + } + } + + private static void WriteAttrs(long f,string typename, string name, Array data) + { + switch (typename) + { + case "single": + Hdf5.WriteAttributes(f, name, data); + break; + case "double": + Hdf5.WriteAttributes(f, name, data); + break; + case "string": + Hdf5.WriteAttributes(f, name, data); + break; + case "int32": + Hdf5.WriteAttributes(f, name, data); + break; + case "int64": + Hdf5.WriteAttributes(f, name, data); + break; + default: + Hdf5.WriteAttributes(f, name,data); + break; + } + } + + private static List> Split(Array list, int chunkSize) + { + var splitList = new List>(); + var chunkCount = (int)Math.Ceiling((double)list.Length / (double)chunkSize); + + for (int c = 0; c < chunkCount; c++) + { + var skip = c * chunkSize; + var take = skip + chunkSize; + var chunk = new List(chunkSize); + + for (int e = skip; e < take && e < list.Length; e++) + { + chunk.Add(list.GetValue(e)); + } + splitList.Add(chunk); + } + + return splitList; + } + + public static string[] load_attributes_from_hdf5_group(long group, string name) + { + var (success, attr) = Hdf5.ReadStringAttributes(group, name, "", true); + if (success) + return attr.ToArray(); + + return null; + } + + public static void load_attributes_from_hdf5_group(long filepath = -1, Dictionary custom_objects = null, bool compile = false) + { + + } + + public static List _legacy_weights(ILayer layer) + { + var weights = layer.TrainableWeights.Select(x => x).ToList(); + weights.AddRange(layer.NonTrainableWeights); + return weights; + } + } +} + From 482899eab734f1b6f3a39ef52a4f9ae28e332ed5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Sat, 22 Jul 2023 15:03:50 +0800 Subject: [PATCH 102/182] fix: revise np.amin, np.amax and add np.argmin --- .../NumPy/NumPy.Sorting.Searching.Counting.cs | 4 ++++ src/TensorFlowNET.Core/NumPy/NumPy.Statistics.cs | 4 ++-- src/TensorFlowNET.Core/Operations/math_ops.cs | 3 +++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Core/NumPy/NumPy.Sorting.Searching.Counting.cs b/src/TensorFlowNET.Core/NumPy/NumPy.Sorting.Searching.Counting.cs index 5182d5726..4cad36e0b 100644 --- a/src/TensorFlowNET.Core/NumPy/NumPy.Sorting.Searching.Counting.cs +++ b/src/TensorFlowNET.Core/NumPy/NumPy.Sorting.Searching.Counting.cs @@ -13,6 +13,10 @@ public partial class np public static NDArray argmax(NDArray a, Axis? axis = null) => new NDArray(math_ops.argmax(a, axis ?? 0)); + [AutoNumPy] + public static NDArray argmin(NDArray a, Axis? axis = null) + => new NDArray(math_ops.argmin(a, axis ?? 0)); + [AutoNumPy] public static NDArray argsort(NDArray a, Axis? axis = null) => new NDArray(sort_ops.argsort(a, axis: axis ?? -1)); diff --git a/src/TensorFlowNET.Core/NumPy/NumPy.Statistics.cs b/src/TensorFlowNET.Core/NumPy/NumPy.Statistics.cs index 5d86b1b39..bce16ec9f 100644 --- a/src/TensorFlowNET.Core/NumPy/NumPy.Statistics.cs +++ b/src/TensorFlowNET.Core/NumPy/NumPy.Statistics.cs @@ -10,10 +10,10 @@ namespace Tensorflow.NumPy public partial class np { [AutoNumPy] - public static NDArray amin(NDArray x, int axis = 0) => new NDArray(tf.arg_min(x, axis)); + public static NDArray amin(NDArray x, int axis = 0) => new NDArray(tf.min(x, axis)); [AutoNumPy] - public static NDArray amax(NDArray x, int axis = 0) => new NDArray(tf.math.argmax(x, axis)); + public static NDArray amax(NDArray x, int axis = 0) => new NDArray(tf.max(x, axis)); [AutoNumPy] public static NDArray average(NDArray a, int axis = -1, NDArray? weights = null, bool returned = false) diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index 092137bf2..e77df702f 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -77,6 +77,9 @@ public static Tensor add_n(Tensor[] inputs, string name = null) public static Tensor argmax(Tensor input, Axis dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) => gen_math_ops.arg_max(input, dimension, output_type: output_type, name: name); + public static Tensor argmin(Tensor input, Axis dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) + => gen_math_ops.arg_min(input, dimension, output_type: output_type, name: name); + public static Tensor round(Tensor x, string name = null) { x = ops.convert_to_tensor(x, name: "x"); From b0ce73caff995d8b5b8080dd41812af4c48908e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Mon, 24 Jul 2023 23:38:58 +0800 Subject: [PATCH 103/182] feat: add adjust_contrast, adjust_hue, combined_non_max_suppression, crop_and_resize image oprs --- src/TensorFlowNET.Core/APIs/tf.image.cs | 131 +++++++- .../Operations/gen_image_ops.cs | 298 +++++++++++++++++- .../TensorFlowNET.Graph.UnitTest/ImageTest.cs | 65 +++- 3 files changed, 479 insertions(+), 15 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.image.cs b/src/TensorFlowNET.Core/APIs/tf.image.cs index 9230b50dc..ac9cbc60d 100644 --- a/src/TensorFlowNET.Core/APIs/tf.image.cs +++ b/src/TensorFlowNET.Core/APIs/tf.image.cs @@ -14,6 +14,10 @@ You may obtain a copy of the License at limitations under the License. ******************************************************************************/ +using OneOf.Types; +using System; +using System.Buffers.Text; +using Tensorflow.Contexts; using static Tensorflow.Binding; namespace Tensorflow @@ -162,17 +166,108 @@ public Tensor ssim_multiscale(Tensor img1, Tensor img2, float max_val, float[] p public Tensor sobel_edges(Tensor image) => image_ops_impl.sobel_edges(image); - public Tensor decode_jpeg(Tensor contents, - int channels = 0, - int ratio = 1, - bool fancy_upscaling = true, - bool try_recover_truncated = false, - int acceptable_fraction = 1, - string dct_method = "", - string name = null) - => gen_image_ops.decode_jpeg(contents, channels: channels, ratio: ratio, - fancy_upscaling: fancy_upscaling, try_recover_truncated: try_recover_truncated, - acceptable_fraction: acceptable_fraction, dct_method: dct_method); + /// + /// Adjust contrast of RGB or grayscale images. + /// + /// Images to adjust. At least 3-D. + /// + /// A float multiplier for adjusting contrast. + /// The contrast-adjusted image or images. + public Tensor adjust_contrast(Tensor images, float contrast_factor, string name = null) + => gen_image_ops.adjust_contrastv2(images, contrast_factor, name); + + /// + /// Adjust hue of RGB images. + /// + /// RGB image or images. The size of the last dimension must be 3. + /// float. How much to add to the hue channel. + /// A name for this operation (optional). + /// Adjusted image(s), same shape and DType as `image`. + /// if `delta` is not in the interval of `[-1, 1]`. + public Tensor adjust_hue(Tensor images, float delta, string name = null) + { + if (tf.Context.executing_eagerly()) + { + if (delta < -1f || delta > 1f) + throw new ValueError("delta must be in the interval [-1, 1]"); + } + return gen_image_ops.adjust_hue(images, delta, name: name); + } + + /// + /// Adjust saturation of RGB images. + /// + /// RGB image or images. The size of the last dimension must be 3. + /// float. Factor to multiply the saturation by. + /// A name for this operation (optional). + /// Adjusted image(s), same shape and DType as `image`. + public Tensor adjust_saturation(Tensor image, float saturation_factor, string name = null) + => gen_image_ops.adjust_saturation(image, saturation_factor, name); + + /// + /// Greedily selects a subset of bounding boxes in descending order of score. + /// + /// + /// A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q` + /// is 1 then same boxes are used for all classes otherwise, if `q` is equal + /// to number of classes, class-specific boxes are used. + /// + /// + /// A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]` + /// representing a single score corresponding to each box(each row of boxes). + /// + /// + /// A scalar integer `Tensor` representing the + /// maximum number of boxes to be selected by non-max suppression per class + /// + /// + /// A int32 scalar representing maximum number of boxes retained + /// over all classes.Note that setting this value to a large number may + /// result in OOM error depending on the system workload. + /// + /// + /// A float representing the threshold for deciding whether boxes + /// overlap too much with respect to IOU. + /// + /// + /// A float representing the threshold for deciding when to + /// remove boxes based on score. + /// + /// + /// If false, the output nmsed boxes, scores and classes are + /// padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`, + /// unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false. + /// + /// + /// If true, the coordinates of output nmsed boxes will be clipped + /// to[0, 1]. If false, output the box coordinates as it is. Defaults to true. + /// + /// + /// 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. + /// 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. + /// 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. + /// 'valid_detections': A [batch_size] int32 tensor indicating the number of + /// valid detections per batch item. Only the top valid_detections[i] entries + /// in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the + /// entries are zero paddings. + /// + public (Tensor, Tensor, Tensor, Tensor) combined_non_max_suppression( + Tensor boxes, + Tensor scores, + int max_output_size_per_class, + int max_total_size, + float iou_threshold, + float score_threshold, + bool pad_per_class = false, + bool clip_boxes = true) + { + var iou_threshold_t = ops.convert_to_tensor(iou_threshold, TF_DataType.TF_FLOAT, name: "iou_threshold"); + var score_threshold_t = ops.convert_to_tensor(score_threshold, TF_DataType.TF_FLOAT, name: "score_threshold"); + var max_total_size_t = ops.convert_to_tensor(max_total_size); + var max_output_size_per_class_t = ops.convert_to_tensor(max_output_size_per_class); + return gen_image_ops.combined_non_max_suppression(boxes, scores, max_output_size_per_class_t, max_total_size_t, + iou_threshold_t, score_threshold_t, pad_per_class, clip_boxes); + } /// /// Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by crop_size. This is more general than the crop_to_bounding_box op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change. @@ -187,7 +282,19 @@ public Tensor decode_jpeg(Tensor contents, /// A name for the operation (optional). /// A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. public Tensor crop_and_resize(Tensor image, Tensor boxes, Tensor box_ind, Tensor crop_size, string method = "bilinear", float extrapolation_value = 0f, string name = null) => - image_ops_impl.crop_and_resize(image, boxes, box_ind, crop_size, method, extrapolation_value, name); + gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method, extrapolation_value, name); + + public Tensor decode_jpeg(Tensor contents, + int channels = 0, + int ratio = 1, + bool fancy_upscaling = true, + bool try_recover_truncated = false, + int acceptable_fraction = 1, + string dct_method = "", + string name = null) + => gen_image_ops.decode_jpeg(contents, channels: channels, ratio: ratio, + fancy_upscaling: fancy_upscaling, try_recover_truncated: try_recover_truncated, + acceptable_fraction: acceptable_fraction, dct_method: dct_method); public Tensor extract_glimpse(Tensor input, Tensor size, Tensor offsets, bool centered = true, bool normalized = true, bool uniform_noise = true, string name = null) diff --git a/src/TensorFlowNET.Core/Operations/gen_image_ops.cs b/src/TensorFlowNET.Core/Operations/gen_image_ops.cs index 9240b5905..cbe661ae5 100644 --- a/src/TensorFlowNET.Core/Operations/gen_image_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_image_ops.cs @@ -16,18 +16,312 @@ limitations under the License. using System; using System.Linq; +using Tensorflow.Eager; using static Tensorflow.Binding; +using Tensorflow.Exceptions; +using Tensorflow.Contexts; +using System.Xml.Linq; +using Google.Protobuf; namespace Tensorflow { public class gen_image_ops { + public static Tensor adjust_contrastv2(Tensor images, Tensor contrast_factor, string name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AdjustContrastv2", name) { + args = new object[] { images, contrast_factor }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return adjust_contrastv2_eager_fallback(images, contrast_factor, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["images"] = images; + keywords["contrast_factor"] = contrast_factor; + var _op = tf.OpDefLib._apply_op_helper("AdjustContrastv2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("AdjustContrastv2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + public static Tensor adjust_contrastv2(Tensor image, float contrast_factor, string name = null) + { + return adjust_contrastv2(image, tf.convert_to_tensor(contrast_factor), name: name); + } + + public static Tensor adjust_contrastv2_eager_fallback(Tensor images, Tensor contrast_factor, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { images, contrast_factor}; + object[] _attrs = new object[] { "T", images.dtype }; + var _result = _execute.execute("AdjustContrastv2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AdjustContrastv2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + + public static Tensor adjust_hue(Tensor images, Tensor delta, string name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AdjustHue", name) { + args = new object[] { images, delta }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return adjust_hue_eager_fallback(images, delta, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["images"] = images; + keywords["delta"] = delta; + var _op = tf.OpDefLib._apply_op_helper("AdjustHue", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("AdjustHue", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor adjust_hue(Tensor images, float delta, string name = null) + => adjust_hue(images, delta, name: name); + + public static Tensor adjust_hue_eager_fallback(Tensor images, Tensor delta, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { images, delta}; + object[] _attrs = new object[] { "T", images.dtype }; + var _result = _execute.execute("AdjustHue", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AdjustHue", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + + public static Tensor adjust_saturation(Tensor images, Tensor scale, string name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AdjustSaturation", name) + { + args = new object[] { images, scale }, + attrs = new Dictionary() { } + }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return adjust_hue_eager_fallback(images, scale, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["images"] = images; + keywords["scale"] = scale; + var _op = tf.OpDefLib._apply_op_helper("AdjustSaturation", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("AdjustSaturation", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor adjust_saturation(Tensor images, float scale, string name = null) + => adjust_saturation(images, ops.convert_to_tensor(scale), name: name); + + public static Tensor adjust_saturation_eager_fallback(Tensor images, Tensor scale, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { images, scale }; + object[] _attrs = new object[] { "T", images.dtype }; + var _result = _execute.execute("AdjustSaturation", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AdjustSaturation", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static (Tensor, Tensor, Tensor, Tensor) combined_non_max_suppression(Tensor boxes, Tensor scores, Tensor max_output_size_per_class, Tensor max_total_size, - Tensor iou_threshold, Tensor score_threshold, bool pad_per_class, bool clip_boxes) + Tensor iou_threshold, Tensor score_threshold, bool pad_per_class = false, bool clip_boxes = true, string name = null) { - throw new NotImplementedException("combined_non_max_suppression"); + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CombinedNonMaxSuppression", name){ + args = new object[] { + boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold, + "pad_per_class", pad_per_class, "clip_boxes", clip_boxes}, + attrs = new Dictionary() { }}); + return (_fast_path_result[0], _fast_path_result[1], _fast_path_result[2], _fast_path_result[3]); + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return combined_non_max_suppression_eager_fallback( + boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, + score_threshold, pad_per_class, clip_boxes, name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["boxes"] = boxes; + keywords["scores"] = scores; + keywords["max_output_size_per_class"] = max_output_size_per_class; + keywords["max_total_size"] = max_total_size; + keywords["iou_threshold"] = iou_threshold; + keywords["score_threshold"] = score_threshold; + keywords["pad_per_class"] = pad_per_class; + keywords["clip_boxes"] = clip_boxes; + + var _op = tf.OpDefLib._apply_op_helper("CombinedNonMaxSuppression", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "pad_per_class", _op._get_attr_type("pad_per_class") ,"clip_boxes", _op._get_attr_type("clip_boxes")}; + _execute.record_gradient("CombinedNonMaxSuppression", _op.inputs, _attrs, _result); + } + return (_result[0], _result[1], _result[2], _result[3]); } + public static (Tensor, Tensor, Tensor, Tensor) combined_non_max_suppression_eager_fallback(Tensor boxes, Tensor scores, Tensor max_output_size_per_class, Tensor max_total_size, + Tensor iou_threshold, Tensor score_threshold, bool pad_per_class, bool clip_boxes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold }; + object[] _attrs = new object[] { "pad_per_class", pad_per_class, "clip_boxes", clip_boxes }; + var _result = _execute.execute("CombinedNonMaxSuppression", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("CombinedNonMaxSuppression", _inputs_flat, _attrs, _result); + } + return (_result[0], _result[1], _result[2], _result[3]); + } + + public static Tensor crop_and_resize(Tensor image, Tensor boxes, Tensor box_ind, Tensor crop_size, string method = "bilinear", float extrapolation_value = 0f, string name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CropAndResize", name) { + args = new object[] { + image, boxes, box_ind, crop_size, "method", method, "extrapolation_value", extrapolation_value }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (NotOkStatusException ex) + { + throw ex; + } + catch (Exception) + { + } + try + { + return crop_and_resize_eager_fallback( + image, boxes, box_ind, crop_size, method: method, extrapolation_value: extrapolation_value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["image"] = image; + keywords["boxes"] = boxes; + keywords["box_ind"] = box_ind; + keywords["crop_size"] = crop_size; + keywords["method"] = method; + keywords["extrapolation_value"] = extrapolation_value; + var _op = tf.OpDefLib._apply_op_helper("CropAndResize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") ,"method", _op._get_attr_type("method") , + "extrapolation_value", _op.get_attr("extrapolation_value")}; + _execute.record_gradient("CropAndResize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor crop_and_resize_eager_fallback(Tensor image, Tensor boxes, Tensor box_ind, Tensor crop_size, string method, float extrapolation_value, string name, Context ctx) + { + if (method is null) + method = "bilinear"; + //var method_cpmpat = ByteString.CopyFromUtf8(method ?? string.Empty); + //var extrapolation_value_float = (float)extrapolation_value; + + Tensor[] _inputs_flat = new Tensor[] { image, boxes, box_ind, crop_size, tf.convert_to_tensor(method), tf.convert_to_tensor(extrapolation_value) }; + object[] _attrs = new object[] { "T", image.dtype }; + var _result = _execute.execute("CropAndResize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("CropAndResize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + + public static Tensor convert_image_dtype(Tensor image, TF_DataType dtype, bool saturate = false, string name = null) { if (dtype == image.dtype) diff --git a/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs b/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs index c42445cf1..151ea834b 100644 --- a/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs @@ -3,6 +3,7 @@ using System.Linq; using Tensorflow; using static Tensorflow.Binding; +using System; namespace TensorFlowNET.UnitTest { @@ -22,13 +23,75 @@ public void Initialize() contents = tf.io.read_file(imgPath); } + [TestMethod] + public void adjust_contrast() + { + var input = np.array(0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f); + var image = tf.reshape(input, new int[] { 3, 3, 1 }); + var img = tf.image.adjust_contrast(image, 2.0f); + var res = np.array(-4f, -2f, 0f, 2f, 4f, 6f, 8f, 10f, 12f).reshape((3,3,1)); + Assert.AreEqual(img.numpy(), res); + } + + [Ignore] + [TestMethod] + public void adjust_hue() + { + var image = tf.constant(new int[] {1,2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18}); + image = tf.reshape(image, new int[] { 3, 2, 3 }); + var adjusted_image = tf.image.adjust_hue(image, 0.2f); + var res = tf.constant(new int[] {2,1,3, 4, 5, 6,8,7,9,11,10,12,14,13,15,17,16,18}); + res = tf.reshape(res,(3,2,3)); + Assert.AreEqual(adjusted_image, res); + } + + [TestMethod] + public void combined_non_max_suppression() + { + var boxesX = tf.constant(new float[,] { { 200, 100, 150, 100 }, { 220, 120, 150, 100 }, { 190, 110, 150, 100 },{ 210, 112, 150, 100 } }); + var boxes1 = tf.reshape(boxesX, (1, 4, 1, 4)); + var scoresX = tf.constant(new float[,] { { 0.2f, 0.7f, 0.1f },{ 0.1f, 0.8f, 0.1f },{ 0.3f, 0.6f, 0.1f },{ 0.05f, 0.9f, 0.05f } }); + var scores1 = tf.reshape(scoresX, (1, 4, 3)); + var (boxes, scores, classes, valid_detections) = tf.image.combined_non_max_suppression(boxes1, scores1, 10, 10, 0.5f, 0.2f, clip_boxes:false); + + var boxes_gt = tf.constant(new float[,] { { 210f, 112f, 150f, 100f }, { 200f, 100f, 150f, 100f }, { 190f, 110f, 150f, 100f }, + { 0f, 0f, 0f, 0f},{ 0f, 0f, 0f, 0f},{ 0f, 0f, 0f, 0f},{ 0f, 0f, 0f , 0f},{ 0f, 0f, 0f, 0f},{ 0f , 0f, 0f, 0f},{ 0f, 0f, 0f, 0f} }); + boxes_gt = tf.reshape(boxes_gt,(1, 10, 4)); + Assert.AreEqual(boxes.numpy(), boxes_gt.numpy()); + var scores_gt = tf.constant(new float[,] { { 0.9f, 0.7f, 0.3f, 0f, 0f, 0f, 0f, 0f, 0f, 0f } }); + scores_gt = tf.reshape(scores_gt, (1, 10)); + Assert.AreEqual(scores.numpy(), scores_gt.numpy()); + var classes_gt = tf.constant(new float[,] { { 1f, 1f, 0f, 0f, 0f, 0f, 0f, 0f, 0f, 0f } }); + classes_gt = tf.reshape(classes_gt, (1, 10)); + Assert.AreEqual(classes.numpy(), classes_gt.numpy()); + var valid_detections_gt = tf.constant(new int[,] { { 3 } }); + valid_detections_gt = tf.reshape(valid_detections_gt, (1)); + Assert.AreEqual(valid_detections.numpy(), valid_detections_gt.numpy()); + } + + [TestMethod] + public void crop_and_resize() + { + int BATCH_SIZE = 1; + int NUM_BOXES = 5; + int IMAGE_HEIGHT = 256; + int IMAGE_WIDTH = 256; + int CHANNELS = 3; + var crop_size = tf.constant(new int[] { 24, 24 }); + var image = tf.random.uniform((BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS)); + var boxes = tf.random.uniform((NUM_BOXES, 4)); + var box_ind = tf.random.uniform((NUM_BOXES), minval: 0, maxval: BATCH_SIZE, dtype: TF_DataType.TF_INT32); + var output = tf.image.crop_and_resize(image, boxes, box_ind, crop_size); + Assert.AreEqual((5,24,24,3), output.shape); + } + [TestMethod] public void decode_image() { var img = tf.image.decode_image(contents); Assert.AreEqual(img.name, "decode_image/DecodeImage:0"); } - + [TestMethod] public void resize_image() { From 3273cbc7f2e14eb030dfc9967ce5bf550186a93e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Tue, 25 Jul 2023 00:09:50 +0800 Subject: [PATCH 104/182] fix: fix ci error --- .../TensorFlowNET.Graph.UnitTest/ImageTest.cs | 31 +++++++++++++------ 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs b/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs index 151ea834b..d671b6096 100644 --- a/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs @@ -28,9 +28,14 @@ public void adjust_contrast() { var input = np.array(0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f); var image = tf.reshape(input, new int[] { 3, 3, 1 }); - var img = tf.image.adjust_contrast(image, 2.0f); + + var init = tf.global_variables_initializer(); + var sess = tf.Session(); + sess.run(init); + var adjust_contrast = tf.image.adjust_contrast(image, 2.0f); + var result = sess.run(adjust_contrast); var res = np.array(-4f, -2f, 0f, 2f, 4f, 6f, 8f, 10f, 12f).reshape((3,3,1)); - Assert.AreEqual(img.numpy(), res); + Assert.AreEqual(result.numpy(), res); } [Ignore] @@ -48,25 +53,31 @@ public void adjust_hue() [TestMethod] public void combined_non_max_suppression() { - var boxesX = tf.constant(new float[,] { { 200, 100, 150, 100 }, { 220, 120, 150, 100 }, { 190, 110, 150, 100 },{ 210, 112, 150, 100 } }); + var boxesX = tf.constant(new float[,] { { 200, 100, 150, 100 }, { 220, 120, 150, 100 }, { 190, 110, 150, 100 }, { 210, 112, 150, 100 } }); var boxes1 = tf.reshape(boxesX, (1, 4, 1, 4)); - var scoresX = tf.constant(new float[,] { { 0.2f, 0.7f, 0.1f },{ 0.1f, 0.8f, 0.1f },{ 0.3f, 0.6f, 0.1f },{ 0.05f, 0.9f, 0.05f } }); + var scoresX = tf.constant(new float[,] { { 0.2f, 0.7f, 0.1f }, { 0.1f, 0.8f, 0.1f }, { 0.3f, 0.6f, 0.1f }, { 0.05f, 0.9f, 0.05f } }); var scores1 = tf.reshape(scoresX, (1, 4, 3)); - var (boxes, scores, classes, valid_detections) = tf.image.combined_non_max_suppression(boxes1, scores1, 10, 10, 0.5f, 0.2f, clip_boxes:false); + + var init = tf.global_variables_initializer(); + var sess = tf.Session(); + sess.run(init); + + var (boxes, scores, classes, valid_detections) = tf.image.combined_non_max_suppression(boxes1, scores1, 10, 10, 0.5f, 0.2f, clip_boxes: false); + var result = sess.run((boxes, scores, classes, valid_detections)); var boxes_gt = tf.constant(new float[,] { { 210f, 112f, 150f, 100f }, { 200f, 100f, 150f, 100f }, { 190f, 110f, 150f, 100f }, { 0f, 0f, 0f, 0f},{ 0f, 0f, 0f, 0f},{ 0f, 0f, 0f, 0f},{ 0f, 0f, 0f , 0f},{ 0f, 0f, 0f, 0f},{ 0f , 0f, 0f, 0f},{ 0f, 0f, 0f, 0f} }); - boxes_gt = tf.reshape(boxes_gt,(1, 10, 4)); - Assert.AreEqual(boxes.numpy(), boxes_gt.numpy()); + boxes_gt = tf.reshape(boxes_gt, (1, 10, 4)); + Assert.AreEqual(result.Item1.numpy(), boxes_gt.numpy()); var scores_gt = tf.constant(new float[,] { { 0.9f, 0.7f, 0.3f, 0f, 0f, 0f, 0f, 0f, 0f, 0f } }); scores_gt = tf.reshape(scores_gt, (1, 10)); - Assert.AreEqual(scores.numpy(), scores_gt.numpy()); + Assert.AreEqual(result.Item2.numpy(), scores_gt.numpy()); var classes_gt = tf.constant(new float[,] { { 1f, 1f, 0f, 0f, 0f, 0f, 0f, 0f, 0f, 0f } }); classes_gt = tf.reshape(classes_gt, (1, 10)); - Assert.AreEqual(classes.numpy(), classes_gt.numpy()); + Assert.AreEqual(result.Item3.numpy(), classes_gt.numpy()); var valid_detections_gt = tf.constant(new int[,] { { 3 } }); valid_detections_gt = tf.reshape(valid_detections_gt, (1)); - Assert.AreEqual(valid_detections.numpy(), valid_detections_gt.numpy()); + Assert.AreEqual(result.Item4.numpy(), valid_detections_gt.numpy()); } [TestMethod] From 005476cbcd71f4bcdfeda8f41461ea20dbdc09df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Wed, 26 Jul 2023 15:31:06 +0800 Subject: [PATCH 105/182] fix: add the gradient of the tf.gradient opr --- src/TensorFlowNET.Core/Gradients/array_grad.cs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs index 1b6bc95ee..4b7027992 100644 --- a/src/TensorFlowNET.Core/Gradients/array_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs @@ -373,5 +373,13 @@ public static Tensor[] _TransposeGrad(Operation op, Tensor[] grads) var p = op.inputs[1]; return new Tensor[] { array_ops.transpose(grads[0], array_ops.invert_permutation(p)), null }; } + + [RegisterGradient("ReverseV2")] + public static Tensor[] _ReverseV2Grad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var axis = op.inputs[1]; + return new Tensor[] { array_ops.reverse(grad, axis), null }; + } } } From f3b3d8be65f8d037dd456d6380bb93d2e888b53c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <“583087864@qq.com”> Date: Fri, 28 Jul 2023 12:42:11 +0800 Subject: [PATCH 106/182] fix: add the momentum parameter's implemention of SGD --- src/TensorFlowNET.Core/Keras/IOptimizerApi.cs | 2 +- .../Training/gen_training_ops.cs | 4 ++++ .../Optimizers/OptimizerApi.cs | 4 ++-- src/TensorFlowNET.Keras/Optimizers/SGD.cs | 19 ++++++++++++++++++- 4 files changed, 25 insertions(+), 4 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs b/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs index d0d3a74f1..19e3a7b8c 100644 --- a/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs +++ b/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs @@ -63,6 +63,6 @@ IOptimizer RMSprop(float learning_rate = 0.001f, bool centered = false, string name = "RMSprop"); - IOptimizer SGD(float learning_rate); + IOptimizer SGD(float learning_rate, float momentum); } } diff --git a/src/TensorFlowNET.Core/Training/gen_training_ops.cs b/src/TensorFlowNET.Core/Training/gen_training_ops.cs index abe85a141..df7dd9e65 100644 --- a/src/TensorFlowNET.Core/Training/gen_training_ops.cs +++ b/src/TensorFlowNET.Core/Training/gen_training_ops.cs @@ -51,5 +51,9 @@ public static Tensor apply_gradient_descent(IVariableV1 var, Tensor alpha, Tenso public static Tensor resource_apply_gradient_descent(Tensor var, Tensor alpha, Tensor delta, bool use_locking = false, string name = null) => tf.Context.ExecuteOp("ResourceApplyGradientDescent", name, new ExecuteOpArgs(var, alpha, delta).SetAttributes(new { use_locking })); + + public static Tensor resource_apply_keras_momentum(Tensor var, Tensor accum, Tensor lr, Tensor grad, Tensor momentum, bool use_locking = false, bool use_nesterov = false, string name = null) + => tf.Context.ExecuteOp("ResourceApplyKerasMomentum", name, + new ExecuteOpArgs(var, accum, lr, grad, momentum).SetAttributes(new { use_locking, use_nesterov })); } } diff --git a/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs b/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs index 280694268..affd43a4f 100644 --- a/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs +++ b/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs @@ -71,7 +71,7 @@ public IOptimizer RMSprop(float learning_rate = 0.001f, Name = name }); - public IOptimizer SGD(float learning_rate) - => new SGD(learning_rate); + public IOptimizer SGD(float learning_rate, float momentum) + => new SGD(learning_rate, momentum); } } diff --git a/src/TensorFlowNET.Keras/Optimizers/SGD.cs b/src/TensorFlowNET.Keras/Optimizers/SGD.cs index f97f4b15f..1d9ceb810 100644 --- a/src/TensorFlowNET.Keras/Optimizers/SGD.cs +++ b/src/TensorFlowNET.Keras/Optimizers/SGD.cs @@ -22,6 +22,8 @@ public SGD(float learning_rate, _set_hyper("decay", decay); _momentum = momentum > 0; + if (momentum < 0 || momentum > 1) + throw new ValueError($"momentum must be a number between 0 and 1, got {momentum}."); _set_hyper("momentum", momentum); @@ -30,6 +32,13 @@ public SGD(float learning_rate, #pragma warning restore CS1717 // Assignment made to same variable } + protected override void _create_slots(IVariableV1[] var_list) + { + if (_momentum) + foreach (var var in var_list) + add_slot(var, "momentum"); + } + protected override void _prepare_local(DeviceDType device_dtype, Dictionary> _apply_state) { @@ -43,7 +52,15 @@ protected override Operation _resource_apply_dense(IVariableV1 var, Tensor grad, { if (_momentum) { - throw new NotImplementedException("_resource_apply_dense"); + var momentum_var = get_slot(var, "momentum"); + return gen_training_ops.resource_apply_keras_momentum( + var.Handle, + momentum_var.Handle, + _get_hyper("learning_rate", var.dtype), + grad, + _get_hyper("momentum", var.dtype), + use_locking: _use_locking, + use_nesterov: nesterov); } var device_dtype = _apply_state.Keys.FirstOrDefault(x => x.Device == var.Device && x.DType == var.dtype.as_base_dtype()); From 6d3f134637308c4a4f01f49ca9e3b0222644a87b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CWanglongzhi2001=E2=80=9D?= <583087864@qq.com> Date: Sat, 29 Jul 2023 15:48:13 +0800 Subject: [PATCH 107/182] fix: remove the reflection in the implemention of Bidirectional --- .../Layers/Rnn/Bidirectional.cs | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/Bidirectional.cs b/src/TensorFlowNET.Keras/Layers/Rnn/Bidirectional.cs index 6114d9c7c..0566b08ad 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/Bidirectional.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/Bidirectional.cs @@ -13,17 +13,17 @@ namespace Tensorflow.Keras.Layers /// public class Bidirectional: Wrapper { - BidirectionalArgs _args; - RNN _forward_layer; - RNN _backward_layer; - RNN _layer; - bool _support_masking = true; int _num_constants = 0; + bool _support_masking = true; bool _return_state; bool _stateful; bool _return_sequences; - InputSpec _input_spec; + BidirectionalArgs _args; RNNArgs _layer_args_copy; + RNN _forward_layer; + RNN _backward_layer; + RNN _layer; + InputSpec _input_spec; public Bidirectional(BidirectionalArgs args):base(args) { _args = args; @@ -66,12 +66,16 @@ public Bidirectional(BidirectionalArgs args):base(args) // Recreate the forward layer from the original layer config, so that it // will not carry over any state from the layer. - var actualType = _layer.GetType(); - if (actualType == typeof(LSTM)) + if (_layer is LSTM) { var arg = _layer_args_copy as LSTMArgs; _forward_layer = new LSTM(arg); } + else if(_layer is SimpleRNN) + { + var arg = _layer_args_copy as SimpleRNNArgs; + _forward_layer = new SimpleRNN(arg); + } // TODO(Wanglongzhi2001), add GRU if case. else { @@ -154,12 +158,18 @@ private RNN _recreate_layer_from_config(RNN layer, bool go_backwards = false) { config.GoBackwards = !config.GoBackwards; } - var actualType = layer.GetType(); - if (actualType == typeof(LSTM)) + + if (layer is LSTM) { var arg = config as LSTMArgs; return new LSTM(arg); } + else if(layer is SimpleRNN) + { + var arg = config as SimpleRNNArgs; + return new SimpleRNN(arg); + } + // TODO(Wanglongzhi2001), add GRU if case. else { return new RNN(cell, config); @@ -183,7 +193,6 @@ public override void build(KerasShapesWrapper input_shape) protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) { // `Bidirectional.call` implements the same API as the wrapped `RNN`. - Tensors forward_inputs; Tensors backward_inputs; Tensors forward_state; From f5eb4ff0a0950fa1b0c3af9b67950e4f4dc90a1a Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Sat, 26 Aug 2023 10:35:45 +0800 Subject: [PATCH 108/182] fix: partially fix the bug of load_model --- .../ArgsDefinition/Activation/ExponentialArgs.cs | 10 ++++++++++ .../ArgsDefinition/Activation/HardSigmoidArgs.cs | 10 ++++++++++ .../Keras/ArgsDefinition/Activation/SELUArgs.cs | 11 +++++++++++ .../Keras/ArgsDefinition/Activation/SoftplusArgs.cs | 10 ++++++++++ .../Keras/ArgsDefinition/Activation/SoftsignArgs.cs | 10 ++++++++++ .../Keras/ArgsDefinition/Activation/SwishArgs.cs | 10 ++++++++++ .../Keras/ArgsDefinition/Activation/TanhArgs.cs | 10 ++++++++++ .../ArgsDefinition/Convolution/Conv2DTransposeArgs.cs | 10 ++++++++++ .../Keras/ArgsDefinition/Merging/AddArgs.cs | 10 ++++++++++ .../Keras/ArgsDefinition/Merging/ConcatenateArgs.cs | 10 ++++++++++ .../Keras/ArgsDefinition/Merging/SubtractArgs.cs | 10 ++++++++++ .../Pooling/GlobalAveragePooling1DArgs.cs | 10 ++++++++++ .../Pooling/GlobalAveragePooling2DArgs.cs | 10 ++++++++++ .../ArgsDefinition/Pooling/GlobalMaxPooling1DArgs.cs | 10 ++++++++++ .../ArgsDefinition/Pooling/GlobalMaxPooling2DArgs.cs | 10 ++++++++++ .../Keras/ArgsDefinition/Pooling/MaxPooling1DArgs.cs | 10 ++++++++++ 16 files changed, 161 insertions(+) create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/ExponentialArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/HardSigmoidArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SELUArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftplusArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftsignArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SwishArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/TanhArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/Conv2DTransposeArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/AddArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/ConcatenateArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/SubtractArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling1DArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling2DArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling1DArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling2DArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/MaxPooling1DArgs.cs diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/ExponentialArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/ExponentialArgs.cs new file mode 100644 index 000000000..ef024971d --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/ExponentialArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class ExponentialArgs : LayerArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/HardSigmoidArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/HardSigmoidArgs.cs new file mode 100644 index 000000000..788e0f36d --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/HardSigmoidArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class HardSigmoidArgs : LayerArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SELUArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SELUArgs.cs new file mode 100644 index 000000000..eb0e18446 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SELUArgs.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class SELUArgs : LayerArgs + { + + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftplusArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftplusArgs.cs new file mode 100644 index 000000000..7b4f20795 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftplusArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class SoftplusArgs : LayerArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftsignArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftsignArgs.cs new file mode 100644 index 000000000..4e23d261d --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftsignArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class SoftsignArgs : LayerArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SwishArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SwishArgs.cs new file mode 100644 index 000000000..3dea06a23 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SwishArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class SwishArgs : LayerArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/TanhArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/TanhArgs.cs new file mode 100644 index 000000000..5df41b71b --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/TanhArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class TanhArgs : LayerArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/Conv2DTransposeArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/Conv2DTransposeArgs.cs new file mode 100644 index 000000000..3daba9465 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/Conv2DTransposeArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class Conv2DTransposeArgs : Conv2DArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/AddArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/AddArgs.cs new file mode 100644 index 000000000..016d58203 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/AddArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class AddArgs : MergeArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/ConcatenateArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/ConcatenateArgs.cs new file mode 100644 index 000000000..4a81d139d --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/ConcatenateArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class ConcatenateArgs : MergeArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/SubtractArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/SubtractArgs.cs new file mode 100644 index 000000000..1e3621cb6 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/SubtractArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class SubtractArgs : MergeArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling1DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling1DArgs.cs new file mode 100644 index 000000000..e73aff766 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling1DArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class GlobalAveragePooling1DArgs : Pooling1DArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling2DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling2DArgs.cs new file mode 100644 index 000000000..d143cf471 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling2DArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class GlobalAveragePooling2DArgs : Pooling2DArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling1DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling1DArgs.cs new file mode 100644 index 000000000..e03227feb --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling1DArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class GlobalMaxPooling1DArgs : Pooling1DArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling2DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling2DArgs.cs new file mode 100644 index 000000000..a95cac836 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling2DArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class GlobalMaxPooling2DArgs : Pooling2DArgs + { + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/MaxPooling1DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/MaxPooling1DArgs.cs new file mode 100644 index 000000000..4cfff2c15 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/MaxPooling1DArgs.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class MaxPooling1DArgs : Pooling1DArgs + { + } +} From f679af67e61c51bee1aca254f993d6d137df07ff Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Sat, 26 Aug 2023 11:36:41 +0800 Subject: [PATCH 109/182] fix: partially fix the bug of load_model --- .../Layers/LayersApi.Activation.cs | 14 +++++++------- .../Layers/LayersApi.Merging.cs | 2 +- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 18 +++++++++--------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.Activation.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.Activation.cs index 280e91e2c..2c55f8fd5 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.Activation.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.Activation.cs @@ -10,14 +10,14 @@ public partial class LayersApi { public ILayer ELU ( float alpha = 0.1f ) => new ELU(new ELUArgs { Alpha = alpha }); public ILayer SELU () - => new SELU(new LayerArgs { }); + => new SELU(new SELUArgs { }); public ILayer Softmax(int axis = -1) => new Softmax(new SoftmaxArgs { axis = axis }); public ILayer Softmax ( Axis axis ) => new Softmax(new SoftmaxArgs { axis = axis }); - public ILayer Softplus () => new Softplus(new LayerArgs { }); - public ILayer HardSigmoid () => new HardSigmoid(new LayerArgs { }); - public ILayer Softsign () => new Softsign(new LayerArgs { }); - public ILayer Swish () => new Swish(new LayerArgs { }); - public ILayer Tanh () => new Tanh(new LayerArgs { }); - public ILayer Exponential () => new Exponential(new LayerArgs { }); + public ILayer Softplus () => new Softplus(new SoftplusArgs { }); + public ILayer HardSigmoid () => new HardSigmoid(new HardSigmoidArgs { }); + public ILayer Softsign () => new Softsign(new SoftsignArgs { }); + public ILayer Swish () => new Swish(new SwishArgs { }); + public ILayer Tanh () => new Tanh(new TanhArgs { }); + public ILayer Exponential () => new Exponential(new ExponentialArgs { }); } } diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.Merging.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.Merging.cs index d94bfb4d8..bf06b1418 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.Merging.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.Merging.cs @@ -14,7 +14,7 @@ public partial class LayersApi /// Axis along which to concatenate. /// public ILayer Concatenate(int axis = -1) - => new Concatenate(new MergeArgs + => new Concatenate(new ConcatenateArgs { Axis = axis }); diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index a04a9c051..9155c7742 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -240,7 +240,7 @@ public ILayer Conv2DTranspose(int filters, string kernel_regularizer = null, string bias_regularizer = null, string activity_regularizer = null) - => new Conv2DTranspose(new Conv2DArgs + => new Conv2DTranspose(new Conv2DTransposeArgs { Rank = 2, Filters = filters, @@ -568,7 +568,7 @@ public ILayer MaxPooling1D(int? pool_size = null, int? strides = null, string padding = "valid", string data_format = null) - => new MaxPooling1D(new Pooling1DArgs + => new MaxPooling1D(new MaxPooling1DArgs { PoolSize = pool_size ?? 2, Strides = strides ?? (pool_size ?? 2), @@ -944,21 +944,21 @@ public ILayer Rescaling(float scale, /// /// public ILayer Add() - => new Add(new MergeArgs { }); + => new Add(new AddArgs { }); /// /// /// /// public ILayer Subtract() - => new Subtract(new MergeArgs { }); + => new Subtract(new SubtractArgs { }); /// /// Global max pooling operation for spatial data. /// /// public ILayer GlobalAveragePooling2D() - => new GlobalAveragePooling2D(new Pooling2DArgs { }); + => new GlobalAveragePooling2D(new GlobalAveragePooling2DArgs { }); /// /// Global average pooling operation for temporal data. @@ -968,7 +968,7 @@ public ILayer GlobalAveragePooling2D() /// /// public ILayer GlobalAveragePooling1D(string data_format = "channels_last") - => new GlobalAveragePooling1D(new Pooling1DArgs { DataFormat = data_format }); + => new GlobalAveragePooling1D(new GlobalAveragePooling1DArgs { DataFormat = data_format }); /// /// Global max pooling operation for spatial data. @@ -977,7 +977,7 @@ public ILayer GlobalAveragePooling1D(string data_format = "channels_last") /// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). /// public ILayer GlobalAveragePooling2D(string data_format = "channels_last") - => new GlobalAveragePooling2D(new Pooling2DArgs { DataFormat = data_format }); + => new GlobalAveragePooling2D(new GlobalAveragePooling2DArgs { DataFormat = data_format }); /// /// Global max pooling operation for 1D temporal data. @@ -988,7 +988,7 @@ public ILayer GlobalAveragePooling2D(string data_format = "channels_last") /// /// public ILayer GlobalMaxPooling1D(string data_format = "channels_last") - => new GlobalMaxPooling1D(new Pooling1DArgs { DataFormat = data_format }); + => new GlobalMaxPooling1D(new GlobalMaxPooling1DArgs { DataFormat = data_format }); /// /// Global max pooling operation for spatial data. @@ -997,7 +997,7 @@ public ILayer GlobalMaxPooling1D(string data_format = "channels_last") /// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). /// public ILayer GlobalMaxPooling2D(string data_format = "channels_last") - => new GlobalMaxPooling2D(new Pooling2DArgs { DataFormat = data_format }); + => new GlobalMaxPooling2D(new GlobalMaxPooling2DArgs { DataFormat = data_format }); /// /// Get an weights initializer from its name. From 8e3ba22c832e6d34598644686e00182924b08c3a Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Sat, 26 Aug 2023 16:29:28 +0800 Subject: [PATCH 110/182] fix: validate dataset of `Imdb` do not load bug & add: custom `Imdb` path --- src/TensorFlowNET.Keras/Datasets/Imdb.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 61ce39475..a62f3f87d 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -31,7 +31,7 @@ public class Imdb /// /// /// - public DatasetPass load_data(string path = "imdb.npz", + public DatasetPass load_data(string? path = "imdb.npz", int num_words = -1, int skip_top = 0, int maxlen = -1, @@ -42,7 +42,7 @@ public DatasetPass load_data(string path = "imdb.npz", { if (maxlen == -1) throw new InvalidArgumentError("maxlen must be assigned."); - var dst = Download(); + var dst = path ?? Download(); var lines = File.ReadAllLines(Path.Combine(dst, "imdb_train.txt")); var x_train_string = new string[lines.Length]; @@ -55,7 +55,7 @@ public DatasetPass load_data(string path = "imdb.npz", var x_train = keras.preprocessing.sequence.pad_sequences(PraseData(x_train_string), maxlen: maxlen); - File.ReadAllLines(Path.Combine(dst, "imdb_test.txt")); + lines = File.ReadAllLines(Path.Combine(dst, "imdb_test.txt")); var x_test_string = new string[lines.Length]; var y_test = np.zeros(new int[] { lines.Length }, np.int64); for (int i = 0; i < lines.Length; i++) From ba1ddb44488bbb2f528065ac2be07e9e6965722e Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Sat, 26 Aug 2023 11:20:12 -0500 Subject: [PATCH 111/182] Set SGD default value. --- src/TensorFlowNET.Core/Keras/IOptimizerApi.cs | 2 +- .../Tensorflow.Binding.csproj | 10 ++--- .../Optimizers/OptimizerApi.cs | 2 +- .../Tensorflow.Keras.csproj | 39 ++++++++++--------- 4 files changed, 28 insertions(+), 25 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs b/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs index 19e3a7b8c..6c15fd469 100644 --- a/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs +++ b/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs @@ -63,6 +63,6 @@ IOptimizer RMSprop(float learning_rate = 0.001f, bool centered = false, string name = "RMSprop"); - IOptimizer SGD(float learning_rate, float momentum); + IOptimizer SGD(float learning_rate = 0.01f, float momentum = 0f); } } diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index ca5aa47a9..babb52561 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -5,13 +5,13 @@ Tensorflow.Binding Tensorflow 2.11.0 - 0.110.2 + 0.110.3 10.0 enable Haiping Chen, Eli Belash, Yaohui Liu, Meinrad Recheis SciSharp STACK False - Apache 2.0, Haiping Chen $([System.DateTime]::UtcNow.ToString(yyyy)) + Apache 2.0, Haiping Chen since 2018 https://github.com/SciSharp/TensorFlow.NET git http://scisharpstack.org @@ -20,7 +20,7 @@ Google's TensorFlow full binding in .NET Standard. Building, training and infering deep learning models. https://tensorflownet.readthedocs.io - 0.110.1.0 + 0.110.3.0 tf.net 0.110.x and above are based on tensorflow native 2.11.0 * Support RNN, LSTM model. @@ -43,7 +43,7 @@ https://tensorflownet.readthedocs.io tf.net 0.10x.x aligns with TensorFlow v2.10.x native library. tf.net 0.11x.x aligns with TensorFlow v2.11.x native library. - 0.110.2.0 + 0.110.3.0 LICENSE true packages @@ -172,7 +172,7 @@ https://tensorflownet.readthedocs.io - + diff --git a/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs b/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs index affd43a4f..a237499f9 100644 --- a/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs +++ b/src/TensorFlowNET.Keras/Optimizers/OptimizerApi.cs @@ -71,7 +71,7 @@ public IOptimizer RMSprop(float learning_rate = 0.001f, Name = name }); - public IOptimizer SGD(float learning_rate, float momentum) + public IOptimizer SGD(float learning_rate = 0.01f, float momentum = 0f) => new SGD(learning_rate, momentum); } } diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj index eeb7c559f..36d1bc1d4 100644 --- a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -7,27 +7,30 @@ enable Tensorflow.Keras AnyCPU;x64 - 0.11.2 + 0.11.3 Haiping Chen Keras for .NET - Apache 2.0, Haiping Chen 2023 + Apache 2.0, Haiping Chen since 2018 TensorFlow.Keras https://github.com/SciSharp/TensorFlow.NET https://avatars3.githubusercontent.com/u/44989469?s=200&v=4 https://github.com/SciSharp/TensorFlow.NET - Keras for .NET is a C# version of Keras ported from the python version. - -* Support CIFAR-10 dataset in keras.datasets. -* Support Conv2D functional API. -* Support BatchNormalization layer. -* Building keras model in subclass, functional and sequential api -* Implemented backward_function. -* Support model.load_weights. -* Add Subtract layer -* Text preprocessing -* Preprocessing.timeseries_dataset_from_array -* Fixed memory leak for YOLOv3 model. -* Support RNN and LSTM models + + Keras for .NET is a C# version of Keras ported from the python version. + + * Support CIFAR-10 dataset in keras.datasets. + * Support Conv2D functional API. + * Support BatchNormalization layer. + * Building keras model in subclass, functional and sequential api + * Implemented backward_function. + * Support model.load_weights. + * Add Subtract layer + * Text preprocessing + * Preprocessing.timeseries_dataset_from_array + * Fixed memory leak for YOLOv3 model. + * Support RNN and LSTM models + * Support Transformer model + Keras for .NET Keras is an API designed for human beings, not machines. Keras follows best practices for reducing cognitive load: it offers consistent & simple APIs, it minimizes the number of user actions required for common use cases, and it provides clear & actionable error messages. @@ -39,8 +42,8 @@ Keras is an API designed for human beings, not machines. Keras follows best prac Git False Open.snk - 0.11.2.0 - 0.11.2.0 + 0.11.3.0 + 0.11.3.0 LICENSE Debug;Release;GPU @@ -140,7 +143,7 @@ Keras is an API designed for human beings, not machines. Keras follows best prac - + From 7b077eac7e6a9e60d9d34be9782e222317fbe353 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Mon, 4 Sep 2023 00:05:22 +0800 Subject: [PATCH 112/182] feat: implement GRU layer --- .../Keras/ArgsDefinition/Rnn/GRUArgs.cs | 29 +++ .../ArgsDefinition/Rnn/GRUOptionalArgs.cs | 13 ++ .../Keras/Layers/ILayersApi.cs | 19 ++ src/TensorFlowNET.Keras/Layers/LayersApi.cs | 61 ++++++- src/TensorFlowNET.Keras/Layers/Rnn/GRU.cs | 168 ++++++++++++++++++ src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs | 42 +---- .../Layers/Rnn.Test.cs | 9 + 7 files changed, 300 insertions(+), 41 deletions(-) create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs create mode 100644 src/TensorFlowNET.Keras/Layers/Rnn/GRU.cs diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUArgs.cs new file mode 100644 index 000000000..cdc3097e9 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUArgs.cs @@ -0,0 +1,29 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class GRUArgs : AutoSerializeLayerArgs + { + public int Units { get; set; } + public Activation Activation { get; set; } + public Activation RecurrentActivation { get; set; } + public bool UseBias { get; set; } = true; + public float Dropout { get; set; } = .0f; + public float RecurrentDropout { get; set; } = .0f; + public IInitializer KernelInitializer { get; set; } + public IInitializer RecurrentInitializer { get; set; } + public IInitializer BiasInitializer { get; set; } + public bool ReturnSequences { get;set; } + public bool ReturnState { get;set; } + public bool GoBackwards { get;set; } + public bool Stateful { get;set; } + public bool Unroll { get;set; } + public bool TimeMajor { get;set; } + public bool ResetAfter { get;set; } + public int Implementation { get; set; } = 2; + + } + +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs new file mode 100644 index 000000000..d441dc828 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition +{ + public class GRUOptionalArgs + { + public string Identifier => "GRU"; + + public Tensor Mask { get; set; } = null; + } +} diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index b8aff5fb6..5e08eadc4 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -259,6 +259,25 @@ public IRnnCell GRUCell( float recurrent_dropout = 0f, bool reset_after = true); + public ILayer GRU( + int units, + string activation = "tanh", + string recurrent_activation = "sigmoid", + bool use_bias = true, + string kernel_initializer = "glorot_uniform", + string recurrent_initializer = "orthogonal", + string bias_initializer = "zeros", + float dropout = 0f, + float recurrent_dropout = 0f, + bool return_sequences = false, + bool return_state = false, + bool go_backwards = false, + bool stateful = false, + bool unroll = false, + bool time_major = false, + bool reset_after = true + ); + /// /// Bidirectional wrapper for RNNs. /// diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 9155c7742..928e7e337 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -784,7 +784,7 @@ public IRnnCell LSTMCell(int uints, string recurrent_activation = "sigmoid", bool use_bias = true, string kernel_initializer = "glorot_uniform", - string recurrent_initializer = "orthogonal", // TODO(Wanglongzhi2001),glorot_uniform has not been developed. + string recurrent_initializer = "orthogonal", string bias_initializer = "zeros", bool unit_forget_bias = true, float dropout = 0f, @@ -908,6 +908,65 @@ public IRnnCell GRUCell( ResetAfter = reset_after }); + /// + /// Gated Recurrent Unit - Cho et al. 2014. + /// + /// Positive integer, dimensionality of the output space. + /// Activation function to use. If you pass `None`, no activation is applied.(ie. "linear" activation: `a(x) = x`). + /// Activation function to use for the recurrent step. If you pass `None`, no activation is applied. (ie. "linear" activation: `a(x) = x`). + /// Boolean, (default `True`), whether the layer uses a bias vector. + /// Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. + /// Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. + /// Initializer for the bias vector. Default: `zeros`. + /// Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. + /// Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. + /// + /// Boolean. Whether to return the last output in the output sequence, or the full sequence. Default: `False`. + /// Boolean. Whether to return the last state in addition to the output. Default: `False`. + /// Boolean (default `False`). If True, process the input sequence backwards and return the reversed sequence. + /// Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. + /// Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, + /// The shape format of the `inputs` and `outputs` tensors. + /// GRU convention (whether to apply reset gate after or before matrix multiplication). False = "before", True = "after" (default and cuDNN compatible). + /// + public ILayer GRU( + int units, + string activation = "tanh", + string recurrent_activation = "sigmoid", + bool use_bias = true, + string kernel_initializer = "glorot_uniform", + string recurrent_initializer = "orthogonal", + string bias_initializer = "zeros", + float dropout = 0f, + float recurrent_dropout = 0f, + bool return_sequences = false, + bool return_state = false, + bool go_backwards = false, + bool stateful = false, + bool unroll = false, + bool time_major = false, + bool reset_after = true + ) + => new GRU(new GRUArgs + { + Units = units, + Activation = keras.activations.GetActivationFromName(activation), + RecurrentActivation = keras.activations.GetActivationFromName(recurrent_activation), + KernelInitializer = GetInitializerByName(kernel_initializer), + RecurrentInitializer = GetInitializerByName(recurrent_initializer), + BiasInitializer = GetInitializerByName(bias_initializer), + UseBias = use_bias, + Dropout = dropout, + RecurrentDropout = recurrent_dropout, + ReturnSequences = return_sequences, + ReturnState = return_state, + GoBackwards = go_backwards, + Stateful = stateful, + TimeMajor = time_major, + Unroll = unroll, + ResetAfter = reset_after + }); + public ILayer Bidirectional( ILayer layer, string merge_mode = "concat", diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/GRU.cs b/src/TensorFlowNET.Keras/Layers/Rnn/GRU.cs new file mode 100644 index 000000000..0919883d2 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Rnn/GRU.cs @@ -0,0 +1,168 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Common.Extensions; +using Tensorflow.Common.Types; +using Tensorflow.Keras.Saving; + + +namespace Tensorflow.Keras.Layers +{ + public class GRU : RNN + { + GRUArgs _args; + private static GRUCell _cell; + + bool _return_runtime; + public GRUCell Cell { get => _cell; } + public int units { get => _args.Units; } + public Activation activation { get => _args.Activation; } + public Activation recurrent_activation { get => _args.RecurrentActivation; } + public bool use_bias { get => _args.UseBias; } + public float dropout { get => _args.Dropout; } + public float recurrent_dropout { get => _args.RecurrentDropout; } + public IInitializer kernel_initializer { get => _args.KernelInitializer; } + public IInitializer recurrent_initializer { get => _args.RecurrentInitializer; } + public IInitializer bias_initializer { get => _args.BiasInitializer; } + public int implementation { get => _args.Implementation; } + public bool reset_after { get => _args.ResetAfter; } + + public GRU(GRUArgs args) : base(CreateCell(args), PreConstruct(args)) + { + _args = args; + + if (_args.Implementation == 0) + { + // Use the red output to act as a warning message that can also be used under the release version + Console.ForegroundColor = ConsoleColor.Red; + Console.WriteLine("Warning: `implementation=0` has been deprecated, "+ + "and now defaults to `implementation=2`."+ + "Please update your layer call."); + Console.ResetColor(); + } + + GRUCell cell = new GRUCell(new GRUCellArgs + { + Units = _args.Units, + Activation = _args.Activation, + RecurrentActivation = _args.RecurrentActivation, + UseBias = _args.UseBias, + Dropout = _args.Dropout, + RecurrentDropout = _args.RecurrentDropout, + KernelInitializer = _args.KernelInitializer, + RecurrentInitializer = _args.RecurrentInitializer, + BiasInitializer = _args.BiasInitializer, + ResetAfter = _args.ResetAfter, + Implementation = _args.Implementation + }); + _cell = cell; + } + + protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bool? training = null, IOptionalArgs? optional_args = null) + { + GRUOptionalArgs? gru_optional_args = optional_args as GRUOptionalArgs; + if (optional_args is not null && gru_optional_args is null) + { + throw new ArgumentException("The type of optional args should be `GRUOptionalArgs`."); + } + Tensors? mask = gru_optional_args?.Mask; + + // Not support ragger input temporarily; + int row_length = 0; + bool is_ragged_input = false; + + _validate_args_if_ragged(is_ragged_input, mask); + + // GRU does not support constants.Ignore it during process. + (inputs, initial_state, _) = this._process_inputs(inputs, initial_state, null); + + if (mask.Length > 1) + { + mask = mask[0]; + } + + var input_shape = inputs.shape; + var timesteps = _args.TimeMajor ? input_shape[0] : input_shape[1]; + + + // TODO(Wanglongzhi2001), finish _could_use_gpu_kernel part + Func step = (cell_inputs, cell_states) => + { + var res = Cell.Apply(cell_inputs, cell_states, training is null ? true : training.Value); + var (output, state) = res; + return (output, state); + }; + + var (last_output, outputs, states) = keras.backend.rnn( + step, + inputs, + initial_state, + constants: null, + go_backwards: _args.GoBackwards, + mask: mask, + unroll: _args.Unroll, + input_length: ops.convert_to_tensor(timesteps), + time_major: _args.TimeMajor, + zero_output_for_mask: base.Args.ZeroOutputForMask, + return_all_outputs: _args.ReturnSequences + ); + + Tensors output; + if (_args.ReturnSequences) + { + output = outputs; + } + else + { + output = last_output; + } + + if (_args.ReturnState) + { + output = new Tensors { output, states }; + } + return output; + } + + private static IRnnCell CreateCell(GRUArgs gruArgs) + { + return new GRUCell(new GRUCellArgs + { + Units = gruArgs.Units, + Activation = gruArgs.Activation, + RecurrentActivation = gruArgs.RecurrentActivation, + UseBias = gruArgs.UseBias, + Dropout = gruArgs.Dropout, + RecurrentDropout = gruArgs.RecurrentDropout, + KernelInitializer = gruArgs.KernelInitializer, + RecurrentInitializer = gruArgs.RecurrentInitializer, + BiasInitializer = gruArgs.BiasInitializer, + ResetAfter = gruArgs.ResetAfter, + Implementation = gruArgs.Implementation + }); + } + + private static RNNArgs PreConstruct(GRUArgs args) + { + return new RNNArgs + { + ReturnSequences = args.ReturnSequences, + ReturnState = args.ReturnState, + GoBackwards = args.GoBackwards, + Stateful = args.Stateful, + Unroll = args.Unroll, + TimeMajor = args.TimeMajor, + Units = args.Units, + Activation = args.Activation, + RecurrentActivation = args.RecurrentActivation, + UseBias = args.UseBias, + Dropout = args.Dropout, + RecurrentDropout = args.RecurrentDropout, + KernelInitializer = args.KernelInitializer, + RecurrentInitializer = args.RecurrentInitializer, + BiasInitializer = args.BiasInitializer + }; + } + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs index c19222614..fec75559c 100644 --- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs @@ -25,8 +25,8 @@ public class RNN : RnnBase private RNNArgs _args; private object _input_spec = null; // or NoneValue?? private object _state_spec = null; - private Tensors _states = null; private object _constants_spec = null; + private Tensors _states = null; private int _num_constants; protected IVariableV1 _kernel; protected IVariableV1 _bias; @@ -469,7 +469,7 @@ public override Tensors Apply(Tensors inputs, Tensors initial_states = null, boo return (inputs, initial_state, constants); } - private void _validate_args_if_ragged(bool is_ragged_input, Tensors mask) + protected void _validate_args_if_ragged(bool is_ragged_input, Tensors mask) { if (!is_ragged_input) { @@ -528,44 +528,6 @@ public Tensors __call__(Tensors inputs, Tensor state = null, Tensor training = n throw new NotImplementedException(); } - // 好像不能cell不能传接口类型 - //public RNN New(IRnnArgCell cell, - // bool return_sequences = false, - // bool return_state = false, - // bool go_backwards = false, - // bool stateful = false, - // bool unroll = false, - // bool time_major = false) - // => new RNN(new RNNArgs - // { - // Cell = cell, - // ReturnSequences = return_sequences, - // ReturnState = return_state, - // GoBackwards = go_backwards, - // Stateful = stateful, - // Unroll = unroll, - // TimeMajor = time_major - // }); - - //public RNN New(List cell, - // bool return_sequences = false, - // bool return_state = false, - // bool go_backwards = false, - // bool stateful = false, - // bool unroll = false, - // bool time_major = false) - // => new RNN(new RNNArgs - // { - // Cell = cell, - // ReturnSequences = return_sequences, - // ReturnState = return_state, - // GoBackwards = go_backwards, - // Stateful = stateful, - // Unroll = unroll, - // TimeMajor = time_major - // }); - - protected Tensors get_initial_state(Tensors inputs) { var input = inputs[0]; diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index 03159346a..dbf5cae1e 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -146,6 +146,15 @@ public void GRUCell() } + [TestMethod] + public void GRU() + { + var inputs = tf.ones((32, 10, 8)); + var gru = tf.keras.layers.GRU(4); + var output = gru.Apply(inputs); + Assert.AreEqual((32, 4), output.shape); + } + [TestMethod] public void Bidirectional() { From 9d10daf30f02ebf078d56aadca59cc269ae23b4d Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Wed, 6 Sep 2023 23:12:00 +0800 Subject: [PATCH 113/182] add reconstruction and setstate of NDArray for loading pickled npy file. --- .../NumPy/DtypeConstructor.cs | 55 ++++++++--- .../Implementation/NumPyImpl.Creation.cs | 3 - .../NumPy/Implementation/NumPyImpl.load.cs | 24 ++--- .../NumPy/MultiArrayConstructor.cs | 35 ++++--- .../NumPy/NDArray.Pickle.cs | 99 ++++++++++++++++++- .../NumPy/NDArrayConverter.cs | 1 + src/TensorFlowNET.Core/Numpy/Numpy.cs | 4 +- src/TensorFlowNET.Keras/Datasets/Imdb.cs | 10 +- 8 files changed, 178 insertions(+), 53 deletions(-) diff --git a/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs b/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs index f84f408e1..30ef82df4 100644 --- a/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs +++ b/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs @@ -16,25 +16,50 @@ class DtypeConstructor : IObjectConstructor { public object construct(object[] args) { - Console.WriteLine("DtypeConstructor"); - Console.WriteLine(args.Length); - for (int i = 0; i < args.Length; i++) - { - Console.WriteLine(args[i]); - } - return new demo(); + var typeCode = (string)args[0]; + TF_DataType dtype; + if (typeCode == "b1") + dtype = np.@bool; + else if (typeCode == "i1") + dtype = np.@byte; + else if (typeCode == "i2") + dtype = np.int16; + else if (typeCode == "i4") + dtype = np.int32; + else if (typeCode == "i8") + dtype = np.int64; + else if (typeCode == "u1") + dtype = np.ubyte; + else if (typeCode == "u2") + dtype = np.uint16; + else if (typeCode == "u4") + dtype = np.uint32; + else if (typeCode == "u8") + dtype = np.uint64; + else if (typeCode == "f4") + dtype = np.float32; + else if (typeCode == "f8") + dtype = np.float64; + else if (typeCode.StartsWith("S")) + dtype = np.@string; + else if (typeCode.StartsWith("O")) + dtype = np.@object; + else + throw new NotSupportedException(); + return new TF_DataType_Warpper(dtype); } } - class demo + public class TF_DataType_Warpper { - public void __setstate__(object[] args) + TF_DataType dtype { get; set; } + public TF_DataType_Warpper(TF_DataType dtype) { - Console.WriteLine("demo __setstate__"); - Console.WriteLine(args.Length); - for (int i = 0; i < args.Length; i++) - { - Console.WriteLine(args[i]); - } + this.dtype = dtype; + } + public void __setstate__(object[] args) { } + public static implicit operator TF_DataType(TF_DataType_Warpper dtypeWarpper) + { + return dtypeWarpper.dtype; } } } diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs index 80b62198a..7b79f83c6 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs @@ -99,9 +99,6 @@ Array ReadValueMatrix(BinaryReader reader, Array matrix, int bytes, Type type, i NDArray ReadObjectMatrix(BinaryReader reader, Array matrix, int[] shape) { - //int data = reader.ReadByte(); - //Console.WriteLine(data); - //Console.WriteLine(reader.ReadByte()); Stream stream = reader.BaseStream; Unpickler.registerConstructor("numpy.core.multiarray", "_reconstruct", new MultiArrayConstructor()); Unpickler.registerConstructor("numpy", "dtype", new DtypeConstructor()); diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs index 789f119a1..bbe48e6a4 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs @@ -28,17 +28,17 @@ public Array LoadMatrix(Stream stream) //if (type == typeof(String)) //return ReadStringMatrix(reader, matrix, bytes, type, shape); - NDArray res = ReadObjectMatrix(reader, matrix, shape); - Console.WriteLine("LoadMatrix"); - Console.WriteLine(res.dims[0]); - Console.WriteLine((int)res[0][0]); - Console.WriteLine(res.dims[1]); - //if (type == typeof(Object)) - //{ - - //} - //else - return ReadValueMatrix(reader, matrix, bytes, type, shape); + + if (type == typeof(Object)) + { + NDArray res = ReadObjectMatrix(reader, matrix, shape); + // res = res.reconstructedNDArray; + return res.reconstructedArray; + } + else + { + return ReadValueMatrix(reader, matrix, bytes, type, shape); + } } } @@ -133,7 +133,7 @@ Type GetType(string dtype, out int bytes, out bool? isLittleEndian) return typeof(Double); if (typeCode.StartsWith("S")) return typeof(String); - if (typeCode == "O") + if (typeCode.StartsWith("O")) return typeof(Object); throw new NotSupportedException(); diff --git a/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs b/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs index 92927cd5a..43eda23e0 100644 --- a/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs +++ b/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs @@ -3,6 +3,7 @@ using System.Diagnostics.CodeAnalysis; using System.Text; using Razorvine.Pickle; +using Razorvine.Pickle.Objects; namespace Tensorflow.NumPy { @@ -17,28 +18,36 @@ public class MultiArrayConstructor : IObjectConstructor { public object construct(object[] args) { - //Console.WriteLine(args.Length); - //for (int i = 0; i < args.Length; i++) - //{ - // Console.WriteLine(args[i]); - //} - Console.WriteLine("MultiArrayConstructor"); - + if (args.Length != 3) + throw new InvalidArgumentError($"Invalid number of arguments in MultiArrayConstructor._reconstruct. Expected three arguments. Given {args.Length} arguments."); + + var types = (ClassDictConstructor)args[0]; + if (types.module != "numpy" || types.name != "ndarray") + throw new RuntimeError("_reconstruct: First argument must be a sub-type of ndarray"); + var arg1 = (Object[])args[1]; var dims = new int[arg1.Length]; for (var i = 0; i < arg1.Length; i++) { dims[i] = (int)arg1[i]; } + var shape = new Shape(dims); - var dtype = TF_DataType.DtInvalid; - switch (args[2]) + TF_DataType dtype; + string identifier; + if (args[2].GetType() == typeof(string)) + identifier = (string)args[2]; + else + identifier = Encoding.UTF8.GetString((byte[])args[2]); + switch (identifier) { - case "b": dtype = TF_DataType.DtUint8Ref; break; - default: throw new NotImplementedException("cannot parse" + args[2]); + case "u": dtype = np.uint32; break; + case "c": dtype = np.complex_; break; + case "f": dtype = np.float32; break; + case "b": dtype = np.@bool; break; + default: throw new NotImplementedException($"Unsupported data type: {args[2]}"); } - return new NDArray(new Shape(dims), dtype); - + return new NDArray(shape, dtype); } } } diff --git a/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs b/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs index b4d66243a..62720826a 100644 --- a/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs +++ b/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs @@ -1,4 +1,7 @@ -using System; +using Newtonsoft.Json.Linq; +using Serilog.Debugging; +using System; +using System.Collections; using System.Collections.Generic; using System.Text; @@ -6,14 +9,100 @@ namespace Tensorflow.NumPy { public partial class NDArray { + public NDArray reconstructedNDArray { get; set; } + public Array reconstructedArray { get; set; } public void __setstate__(object[] args) { - Console.WriteLine("NDArray __setstate__"); - Console.WriteLine(args.Length); - for (int i = 0; i < args.Length; i++) + if (args.Length != 5) + throw new InvalidArgumentError($"Invalid number of arguments in NDArray.__setstate__. Expected five arguments. Given {args.Length} arguments."); + + var version = (int)args[0]; // version + + var arg1 = (Object[])args[1]; + var dims = new int[arg1.Length]; + for (var i = 0; i < arg1.Length; i++) + { + dims[i] = (int)arg1[i]; + } + var _ShapeLike = new Shape(dims); // shape + + TF_DataType _DType_co = (TF_DataType_Warpper)args[2]; // DType + + var F_continuous = (bool)args[3]; // F-continuous + if (F_continuous) + throw new InvalidArgumentError("Fortran Continuous memory layout is not supported. Please use C-continuous layout or check the data format."); + + var data = args[4]; // Data + /* + * If we ever need another pickle format, increment the version + * number. But we should still be able to handle the old versions. + */ + if (version < 0 || version > 4) + throw new ValueError($"can't handle version {version} of numpy.dtype pickle"); + + // TODO: Implement the missing details and checks from the official Numpy C code here. + // https://github.com/numpy/numpy/blob/2f0bd6e86a77e4401d0384d9a75edf9470c5deb6/numpy/core/src/multiarray/descriptor.c#L2761 + + if (data.GetType() == typeof(ArrayList)) + { + SetState((ArrayList)data); + } + else + throw new NotImplementedException(""); + } + private void SetState(ArrayList arrayList) + { + int ndim = 1; + var subArrayList = arrayList; + while (subArrayList.Count > 0 && subArrayList[0] != null && subArrayList[0].GetType() == typeof(ArrayList)) + { + subArrayList = (ArrayList)subArrayList[0]; + ndim += 1; + } + var type = subArrayList[0].GetType(); + if (type == typeof(int)) { - Console.WriteLine(args[i]); + if (ndim == 1) + { + int[] list = (int[])arrayList.ToArray(typeof(int)); + Shape shape = new Shape(new int[] { arrayList.Count }); + reconstructedArray = list; + reconstructedNDArray = new NDArray(list, shape); + //SetData(new[] { new Slice() }, new NDArray(list, shape)); + //set_shape(shape); + } + if (ndim == 2) + { + int secondDim = 0; + foreach (ArrayList subArray in arrayList) + { + secondDim = subArray.Count > secondDim ? subArray.Count : secondDim; + } + int[,] list = new int[arrayList.Count, secondDim]; + for (int i = 0; i < arrayList.Count; i++) + { + var subArray = (ArrayList?)arrayList[i]; + if (subArray == null) + throw new NullReferenceException(""); + for (int j = 0; j < subArray.Count; j++) + { + var element = subArray[j]; + if (element == null) + throw new NoNullAllowedException("the element of ArrayList cannot be null."); + list[i,j] = (int) element; + } + } + Shape shape = new Shape(new int[] { arrayList.Count, secondDim }); + reconstructedArray = list; + reconstructedNDArray = new NDArray(list, shape); + //SetData(new[] { new Slice() }, new NDArray(list, shape)); + //set_shape(shape); + } + if (ndim > 2) + throw new NotImplementedException("can't handle ArrayList with more than two dimensions."); } + else + throw new NotImplementedException(""); } } } diff --git a/src/TensorFlowNET.Core/NumPy/NDArrayConverter.cs b/src/TensorFlowNET.Core/NumPy/NDArrayConverter.cs index c8c2d45fa..4c64eba74 100644 --- a/src/TensorFlowNET.Core/NumPy/NDArrayConverter.cs +++ b/src/TensorFlowNET.Core/NumPy/NDArrayConverter.cs @@ -10,6 +10,7 @@ public class NDArrayConverter public unsafe static T Scalar(NDArray nd) where T : unmanaged => nd.dtype switch { + TF_DataType.TF_BOOL => Scalar(*(bool*)nd.data), TF_DataType.TF_UINT8 => Scalar(*(byte*)nd.data), TF_DataType.TF_FLOAT => Scalar(*(float*)nd.data), TF_DataType.TF_INT32 => Scalar(*(int*)nd.data), diff --git a/src/TensorFlowNET.Core/Numpy/Numpy.cs b/src/TensorFlowNET.Core/Numpy/Numpy.cs index 72d2e981c..fee2d63fc 100644 --- a/src/TensorFlowNET.Core/Numpy/Numpy.cs +++ b/src/TensorFlowNET.Core/Numpy/Numpy.cs @@ -43,7 +43,9 @@ public partial class np public static readonly TF_DataType @decimal = TF_DataType.TF_DOUBLE; public static readonly TF_DataType complex_ = TF_DataType.TF_COMPLEX; public static readonly TF_DataType complex64 = TF_DataType.TF_COMPLEX64; - public static readonly TF_DataType complex128 = TF_DataType.TF_COMPLEX128; + public static readonly TF_DataType complex128 = TF_DataType.TF_COMPLEX128; + public static readonly TF_DataType @string = TF_DataType.TF_STRING; + public static readonly TF_DataType @object = TF_DataType.TF_VARIANT; #endregion public static double nan => double.NaN; diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 016b352d9..6808035c6 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -70,7 +70,7 @@ namespace Tensorflow.Keras.Datasets public class Imdb { string origin_folder = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"; - string file_name = "imdb.npz"; + string file_name = "simple.npz"; string dest_folder = "imdb"; /// /// Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/). @@ -128,13 +128,15 @@ public DatasetPass load_data(string path = "imdb.npz", (NDArray, NDArray) LoadX(byte[] bytes) { - var y = np.Load_Npz(bytes); - return (y["x_train.npy"], y["x_test.npy"]); + var y = np.Load_Npz(bytes); + var x_train = y["x_train.npy"]; + var x_test = y["x_test.npy"]; + return (x_train, x_test); } (NDArray, NDArray) LoadY(byte[] bytes) { - var y = np.Load_Npz(bytes); + var y = np.Load_Npz(bytes); return (y["y_train.npy"], y["y_test.npy"]); } From ea978bbf214a75ead94c568755255a6f3c6fed58 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Thu, 7 Sep 2023 21:33:29 +0800 Subject: [PATCH 114/182] optimize code structure of reconstruction ndarray from pickled npy file --- .../Implementation/NumPyImpl.Creation.cs | 12 ++---- .../NumPy/Implementation/NumPyImpl.load.cs | 10 +---- .../NumPy/Pickle/DTypePickleWarpper.cs | 20 ++++++++++ .../NumPy/{ => Pickle}/DtypeConstructor.cs | 17 +------- .../{ => Pickle}/MultiArrayConstructor.cs | 14 +++---- .../MultiArrayPickleWarpper.cs} | 39 ++++++++++++------- src/TensorFlowNET.Core/tensorflow.cs | 6 +++ src/TensorFlowNET.Keras/Datasets/Imdb.cs | 19 +++------ .../Dataset/DatasetTest.cs | 6 +-- 9 files changed, 75 insertions(+), 68 deletions(-) create mode 100644 src/TensorFlowNET.Core/NumPy/Pickle/DTypePickleWarpper.cs rename src/TensorFlowNET.Core/NumPy/{ => Pickle}/DtypeConstructor.cs (77%) rename src/TensorFlowNET.Core/NumPy/{ => Pickle}/MultiArrayConstructor.cs (91%) rename src/TensorFlowNET.Core/NumPy/{NDArray.Pickle.cs => Pickle/MultiArrayPickleWarpper.cs} (77%) diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs index 7b79f83c6..fa4ef0191 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs @@ -5,6 +5,7 @@ using System.Text; using Tensorflow.Util; using Razorvine.Pickle; +using Tensorflow.NumPy.Pickle; using static Tensorflow.Binding; namespace Tensorflow.NumPy @@ -94,20 +95,15 @@ Array ReadValueMatrix(BinaryReader reader, Array matrix, int bytes, Type type, i var buffer = reader.ReadBytes(bytes * total); System.Buffer.BlockCopy(buffer, 0, matrix, 0, buffer.Length); + return matrix; } - NDArray ReadObjectMatrix(BinaryReader reader, Array matrix, int[] shape) + Array ReadObjectMatrix(BinaryReader reader, Array matrix, int[] shape) { Stream stream = reader.BaseStream; - Unpickler.registerConstructor("numpy.core.multiarray", "_reconstruct", new MultiArrayConstructor()); - Unpickler.registerConstructor("numpy", "dtype", new DtypeConstructor()); - var unpickler = new Unpickler(); - - NDArray result = (NDArray) unpickler.load(stream); - Console.WriteLine(result.dims); - return result; + return (MultiArrayPickleWarpper)unpickler.load(stream); } public (NDArray, NDArray) meshgrid(T[] array, bool copy = true, bool sparse = false) diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs index bbe48e6a4..199e5ced3 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs @@ -30,17 +30,12 @@ public Array LoadMatrix(Stream stream) //return ReadStringMatrix(reader, matrix, bytes, type, shape); if (type == typeof(Object)) - { - NDArray res = ReadObjectMatrix(reader, matrix, shape); - // res = res.reconstructedNDArray; - return res.reconstructedArray; - } + return ReadObjectMatrix(reader, matrix, shape); else { return ReadValueMatrix(reader, matrix, bytes, type, shape); } } - } public T Load(Stream stream) @@ -59,7 +54,7 @@ bool ParseReader(BinaryReader reader, out int bytes, out Type t, out int[] shape shape = null; // The first 6 bytes are a magic string: exactly "x93NUMPY" - if (reader.ReadByte() != 0x93) return false; + if (reader.ReadChar() != 63) return false; if (reader.ReadChar() != 'N') return false; if (reader.ReadChar() != 'U') return false; if (reader.ReadChar() != 'M') return false; @@ -75,7 +70,6 @@ bool ParseReader(BinaryReader reader, out int bytes, out Type t, out int[] shape ushort len = reader.ReadUInt16(); string header = new String(reader.ReadChars(len)); - Console.WriteLine(header); string mark = "'descr': '"; int s = header.IndexOf(mark) + mark.Length; int e = header.IndexOf("'", s + 1); diff --git a/src/TensorFlowNET.Core/NumPy/Pickle/DTypePickleWarpper.cs b/src/TensorFlowNET.Core/NumPy/Pickle/DTypePickleWarpper.cs new file mode 100644 index 000000000..5dff6c16b --- /dev/null +++ b/src/TensorFlowNET.Core/NumPy/Pickle/DTypePickleWarpper.cs @@ -0,0 +1,20 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.NumPy.Pickle +{ + public class DTypePickleWarpper + { + TF_DataType dtype { get; set; } + public DTypePickleWarpper(TF_DataType dtype) + { + this.dtype = dtype; + } + public void __setstate__(object[] args) { } + public static implicit operator TF_DataType(DTypePickleWarpper dTypeWarpper) + { + return dTypeWarpper.dtype; + } + } +} diff --git a/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs b/src/TensorFlowNET.Core/NumPy/Pickle/DtypeConstructor.cs similarity index 77% rename from src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs rename to src/TensorFlowNET.Core/NumPy/Pickle/DtypeConstructor.cs index 30ef82df4..160c7d4e9 100644 --- a/src/TensorFlowNET.Core/NumPy/DtypeConstructor.cs +++ b/src/TensorFlowNET.Core/NumPy/Pickle/DtypeConstructor.cs @@ -4,7 +4,7 @@ using System.Text; using Razorvine.Pickle; -namespace Tensorflow.NumPy +namespace Tensorflow.NumPy.Pickle { /// /// @@ -46,20 +46,7 @@ public object construct(object[] args) dtype = np.@object; else throw new NotSupportedException(); - return new TF_DataType_Warpper(dtype); - } - } - public class TF_DataType_Warpper - { - TF_DataType dtype { get; set; } - public TF_DataType_Warpper(TF_DataType dtype) - { - this.dtype = dtype; - } - public void __setstate__(object[] args) { } - public static implicit operator TF_DataType(TF_DataType_Warpper dtypeWarpper) - { - return dtypeWarpper.dtype; + return new DTypePickleWarpper(dtype); } } } diff --git a/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs b/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayConstructor.cs similarity index 91% rename from src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs rename to src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayConstructor.cs index 43eda23e0..885f368c4 100644 --- a/src/TensorFlowNET.Core/NumPy/MultiArrayConstructor.cs +++ b/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayConstructor.cs @@ -5,7 +5,7 @@ using Razorvine.Pickle; using Razorvine.Pickle.Objects; -namespace Tensorflow.NumPy +namespace Tensorflow.NumPy.Pickle { /// /// Creates multiarrays of objects. Returns a primitive type multiarray such as int[][] if @@ -18,14 +18,14 @@ public class MultiArrayConstructor : IObjectConstructor { public object construct(object[] args) { - if (args.Length != 3) + if (args.Length != 3) throw new InvalidArgumentError($"Invalid number of arguments in MultiArrayConstructor._reconstruct. Expected three arguments. Given {args.Length} arguments."); - + var types = (ClassDictConstructor)args[0]; - if (types.module != "numpy" || types.name != "ndarray") + if (types.module != "numpy" || types.name != "ndarray") throw new RuntimeError("_reconstruct: First argument must be a sub-type of ndarray"); - - var arg1 = (Object[])args[1]; + + var arg1 = (object[])args[1]; var dims = new int[arg1.Length]; for (var i = 0; i < arg1.Length; i++) { @@ -47,7 +47,7 @@ public object construct(object[] args) case "b": dtype = np.@bool; break; default: throw new NotImplementedException($"Unsupported data type: {args[2]}"); } - return new NDArray(shape, dtype); + return new MultiArrayPickleWarpper(shape, dtype); } } } diff --git a/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs b/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayPickleWarpper.cs similarity index 77% rename from src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs rename to src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayPickleWarpper.cs index 62720826a..af8d1ecc2 100644 --- a/src/TensorFlowNET.Core/NumPy/NDArray.Pickle.cs +++ b/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayPickleWarpper.cs @@ -5,12 +5,19 @@ using System.Collections.Generic; using System.Text; -namespace Tensorflow.NumPy +namespace Tensorflow.NumPy.Pickle { - public partial class NDArray + public class MultiArrayPickleWarpper { + public Shape reconstructedShape { get; set; } + public TF_DataType reconstructedDType { get; set; } public NDArray reconstructedNDArray { get; set; } - public Array reconstructedArray { get; set; } + public Array reconstructedMultiArray { get; set; } + public MultiArrayPickleWarpper(Shape shape, TF_DataType dtype) + { + reconstructedShape = shape; + reconstructedDType = dtype; + } public void __setstate__(object[] args) { if (args.Length != 5) @@ -18,7 +25,7 @@ public void __setstate__(object[] args) var version = (int)args[0]; // version - var arg1 = (Object[])args[1]; + var arg1 = (object[])args[1]; var dims = new int[arg1.Length]; for (var i = 0; i < arg1.Length; i++) { @@ -26,7 +33,7 @@ public void __setstate__(object[] args) } var _ShapeLike = new Shape(dims); // shape - TF_DataType _DType_co = (TF_DataType_Warpper)args[2]; // DType + TF_DataType _DType_co = (DTypePickleWarpper)args[2]; // DType var F_continuous = (bool)args[3]; // F-continuous if (F_continuous) @@ -45,12 +52,12 @@ public void __setstate__(object[] args) if (data.GetType() == typeof(ArrayList)) { - SetState((ArrayList)data); + Reconstruct((ArrayList)data); } else throw new NotImplementedException(""); } - private void SetState(ArrayList arrayList) + private void Reconstruct(ArrayList arrayList) { int ndim = 1; var subArrayList = arrayList; @@ -66,10 +73,8 @@ private void SetState(ArrayList arrayList) { int[] list = (int[])arrayList.ToArray(typeof(int)); Shape shape = new Shape(new int[] { arrayList.Count }); - reconstructedArray = list; + reconstructedMultiArray = list; reconstructedNDArray = new NDArray(list, shape); - //SetData(new[] { new Slice() }, new NDArray(list, shape)); - //set_shape(shape); } if (ndim == 2) { @@ -89,14 +94,12 @@ private void SetState(ArrayList arrayList) var element = subArray[j]; if (element == null) throw new NoNullAllowedException("the element of ArrayList cannot be null."); - list[i,j] = (int) element; + list[i, j] = (int)element; } } Shape shape = new Shape(new int[] { arrayList.Count, secondDim }); - reconstructedArray = list; + reconstructedMultiArray = list; reconstructedNDArray = new NDArray(list, shape); - //SetData(new[] { new Slice() }, new NDArray(list, shape)); - //set_shape(shape); } if (ndim > 2) throw new NotImplementedException("can't handle ArrayList with more than two dimensions."); @@ -104,5 +107,13 @@ private void SetState(ArrayList arrayList) else throw new NotImplementedException(""); } + public static implicit operator Array(MultiArrayPickleWarpper arrayWarpper) + { + return arrayWarpper.reconstructedMultiArray; + } + public static implicit operator NDArray(MultiArrayPickleWarpper arrayWarpper) + { + return arrayWarpper.reconstructedNDArray; + } } } diff --git a/src/TensorFlowNET.Core/tensorflow.cs b/src/TensorFlowNET.Core/tensorflow.cs index dc4e48da8..e368b37cd 100644 --- a/src/TensorFlowNET.Core/tensorflow.cs +++ b/src/TensorFlowNET.Core/tensorflow.cs @@ -14,6 +14,7 @@ You may obtain a copy of the License at limitations under the License. ******************************************************************************/ +using Razorvine.Pickle; using Serilog; using Serilog.Core; using System.Reflection; @@ -22,6 +23,7 @@ limitations under the License. using Tensorflow.Eager; using Tensorflow.Gradients; using Tensorflow.Keras; +using Tensorflow.NumPy.Pickle; namespace Tensorflow { @@ -98,6 +100,10 @@ public tensorflow() "please visit https://github.com/SciSharp/TensorFlow.NET. If it still not work after installing the backend, please submit an " + "issue to https://github.com/SciSharp/TensorFlow.NET/issues"); } + + // register numpy reconstructor for pickle + Unpickler.registerConstructor("numpy.core.multiarray", "_reconstruct", new MultiArrayConstructor()); + Unpickler.registerConstructor("numpy", "dtype", new DtypeConstructor()); } public string VERSION => c_api.StringPiece(c_api.TF_Version()); diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 6808035c6..a992ae84a 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -5,13 +5,6 @@ using Tensorflow.Keras.Utils; using Tensorflow.NumPy; using System.Linq; -using Google.Protobuf.Collections; -using Microsoft.VisualBasic; -using OneOf.Types; -using static HDF.PInvoke.H5; -using System.Data; -using System.Reflection.Emit; -using System.Xml.Linq; namespace Tensorflow.Keras.Datasets { @@ -70,8 +63,9 @@ namespace Tensorflow.Keras.Datasets public class Imdb { string origin_folder = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"; - string file_name = "simple.npz"; + string file_name = "imdb.npz"; string dest_folder = "imdb"; + /// /// Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/). /// @@ -95,8 +89,9 @@ public DatasetPass load_data(string path = "imdb.npz", { var dst = Download(); var fileBytes = File.ReadAllBytes(Path.Combine(dst, file_name)); - var (x_train, x_test) = LoadX(fileBytes); var (y_train, y_test) = LoadY(fileBytes); + var (x_train, x_test) = LoadX(fileBytes); + /*var lines = File.ReadAllLines(Path.Combine(dst, "imdb_train.txt")); var x_train_string = new string[lines.Length]; var y_train = np.zeros(new int[] { lines.Length }, np.int64); @@ -129,14 +124,12 @@ public DatasetPass load_data(string path = "imdb.npz", (NDArray, NDArray) LoadX(byte[] bytes) { var y = np.Load_Npz(bytes); - var x_train = y["x_train.npy"]; - var x_test = y["x_test.npy"]; - return (x_train, x_test); + return (y["x_train.npy"], y["x_test.npy"]); } (NDArray, NDArray) LoadY(byte[] bytes) { - var y = np.Load_Npz(bytes); + var y = np.Load_Npz(bytes); return (y["y_train.npy"], y["y_test.npy"]); } diff --git a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs index 778290bb8..db6252efc 100644 --- a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs +++ b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs @@ -1,6 +1,5 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; -using System.Collections.Generic; using System.Linq; using static Tensorflow.Binding; using static Tensorflow.KerasApi; @@ -197,6 +196,7 @@ public void Shuffle() Assert.IsFalse(allEqual); } + [Ignore] [TestMethod] public void GetData() { @@ -209,8 +209,8 @@ public void GetData() var y_val = dataset.Test.Item2; print(len(x_train) + "Training sequences"); print(len(x_val) + "Validation sequences"); - x_train = keras.preprocessing.sequence.pad_sequences((IEnumerable)x_train, maxlen: maxlen); - x_val = keras.preprocessing.sequence.pad_sequences((IEnumerable)x_val, maxlen: maxlen); + //x_train = keras.preprocessing.sequence.pad_sequences((IEnumerable)x_train, maxlen: maxlen); + //x_val = keras.preprocessing.sequence.pad_sequences((IEnumerable)x_val, maxlen: maxlen); } } } From 28c77f53d64dbe78284bf46b00c8c945d76fb31c Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 8 Sep 2023 17:38:54 +0800 Subject: [PATCH 115/182] implement Imdb dataset loader --- .../NumPy/Implementation/RandomizedImpl.cs | 4 +- src/TensorFlowNET.Keras/Datasets/Imdb.cs | 186 ++++++++++++------ src/TensorFlowNET.Keras/Utils/data_utils.cs | 47 +++++ .../Dataset/DatasetTest.cs | 28 ++- 4 files changed, 198 insertions(+), 67 deletions(-) diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/RandomizedImpl.cs b/src/TensorFlowNET.Core/NumPy/Implementation/RandomizedImpl.cs index 064c7362f..a707e8aae 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/RandomizedImpl.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/RandomizedImpl.cs @@ -14,9 +14,9 @@ public class RandomizedImpl public NDArray permutation(NDArray x) => new NDArray(random_ops.random_shuffle(x)); [AutoNumPy] - public void shuffle(NDArray x) + public void shuffle(NDArray x, int? seed = null) { - var y = random_ops.random_shuffle(x); + var y = random_ops.random_shuffle(x, seed); Marshal.Copy(y.BufferToArray(), 0, x.TensorDataPointer, (int)x.bytesize); } diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 68364ea67..0266b48bd 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -3,8 +3,6 @@ using System.IO; using System.Text; using Tensorflow.Keras.Utils; -using Tensorflow.NumPy; -using System.Linq; namespace Tensorflow.Keras.Datasets { @@ -41,14 +39,14 @@ namespace Tensorflow.Keras.Datasets /// `skip_top` limits will be replaced with this character. /// index_from: int. Index actual words with this index and higher. /// Returns: - /// Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. + /// Tuple of Numpy arrays: `(x_train, labels_train), (x_test, labels_test)`. /// /// ** x_train, x_test**: lists of sequences, which are lists of indexes /// (integers). If the num_words argument was specific, the maximum /// possible index value is `num_words - 1`. If the `maxlen` argument was /// specified, the largest possible sequence length is `maxlen`. /// - /// ** y_train, y_test**: lists of integer labels(1 or 0). + /// ** labels_train, labels_test**: lists of integer labels(1 or 0). /// /// Raises: /// ValueError: in case `maxlen` is so low @@ -63,7 +61,6 @@ namespace Tensorflow.Keras.Datasets public class Imdb { string origin_folder = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"; - string file_name = "imdb.npz"; string dest_folder = "imdb"; /// @@ -78,43 +75,139 @@ public class Imdb /// /// /// - public DatasetPass load_data(string? path = "imdb.npz", - int num_words = -1, + public DatasetPass load_data( + string path = "imdb.npz", + int? num_words = null, int skip_top = 0, - int maxlen = -1, + int? maxlen = null, int seed = 113, - int start_char = 1, - int oov_char= 2, + int? start_char = 1, + int? oov_char = 2, int index_from = 3) { - if (maxlen == -1) throw new InvalidArgumentError("maxlen must be assigned."); - - var dst = path ?? Download(); - var fileBytes = File.ReadAllBytes(Path.Combine(dst, file_name)); - var (y_train, y_test) = LoadY(fileBytes); + path = data_utils.get_file( + path, + origin: Path.Combine(origin_folder, "imdb.npz"), + file_hash: "69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f" + ); + path = Path.Combine(path, "imdb.npz"); + var fileBytes = File.ReadAllBytes(path); var (x_train, x_test) = LoadX(fileBytes); - - /*var lines = File.ReadAllLines(Path.Combine(dst, "imdb_train.txt")); - var x_train_string = new string[lines.Length]; - var y_train = np.zeros(new int[] { lines.Length }, np.int64); - for (int i = 0; i < lines.Length; i++) + var (labels_train, labels_test) = LoadY(fileBytes); + x_test.astype(np.int32); + labels_test.astype(np.int32); + + var indices = np.arange(len(x_train)); + np.random.shuffle(indices, seed); + x_train = x_train[indices]; + labels_train = labels_train[indices]; + + indices = np.arange(len(x_test)); + np.random.shuffle(indices, seed); + x_test = x_test[indices]; + labels_test = labels_test[indices]; + + if (start_char != null) + { + int[,] new_x_train = new int[x_train.shape[0], x_train.shape[1] + 1]; + for (var i = 0; i < x_train.shape[0]; i++) + { + new_x_train[i, 0] = (int)start_char; + for (var j = 0; j < x_train.shape[1]; j++) + { + new_x_train[i, j + 1] = x_train[i][j]; + } + } + int[,] new_x_test = new int[x_test.shape[0], x_test.shape[1] + 1]; + for (var i = 0; i < x_test.shape[0]; i++) + { + new_x_test[i, 0] = (int)start_char; + for (var j = 0; j < x_test.shape[1]; j++) + { + new_x_test[i, j + 1] = x_test[i][j]; + } + } + x_train = new NDArray(new_x_train); + x_test = new NDArray(new_x_test); + } + else if (index_from != 0) + { + for (var i = 0; i < x_train.shape[0]; i++) + { + for (var j = 0; j < x_train.shape[1]; j++) + { + if (x_train[i, j] != 0) + x_train[i, j] += index_from; + } + } + for (var i = 0; i < x_test.shape[0]; i++) + { + for (var j = 0; j < x_test.shape[1]; j++) + { + if (x_test[i, j] != 0) + x_test[i, j] += index_from; + } + } + } + + if (maxlen != null) { - y_train[i] = long.Parse(lines[i].Substring(0, 1)); - x_train_string[i] = lines[i].Substring(2); + (x_train, labels_train) = data_utils._remove_long_seq((int)maxlen, x_train, labels_train); + (x_test, labels_test) = data_utils._remove_long_seq((int)maxlen, x_test, labels_test); + if (x_train.size == 0 || x_test.size == 0) + throw new ValueError("After filtering for sequences shorter than maxlen=" + + $"{maxlen}, no sequence was kept. Increase maxlen."); } - var x_train = keras.preprocessing.sequence.pad_sequences(PraseData(x_train_string), maxlen: maxlen); + var xs = np.concatenate(new[] { x_train, x_test }); + var labels = np.concatenate(new[] { labels_train, labels_test }); - lines = File.ReadAllLines(Path.Combine(dst, "imdb_test.txt")); - var x_test_string = new string[lines.Length]; - var y_test = np.zeros(new int[] { lines.Length }, np.int64); - for (int i = 0; i < lines.Length; i++) + if(num_words == null) { - y_test[i] = long.Parse(lines[i].Substring(0, 1)); - x_test_string[i] = lines[i].Substring(2); + num_words = 0; + for (var i = 0; i < xs.shape[0]; i++) + for (var j = 0; j < xs.shape[1]; j++) + num_words = max((int)num_words, (int)xs[i][j]); } - var x_test = np.array(x_test_string);*/ + // by convention, use 2 as OOV word + // reserve 'index_from' (=3 by default) characters: + // 0 (padding), 1 (start), 2 (OOV) + if (oov_char != null) + { + int[,] new_xs = new int[xs.shape[0], xs.shape[1]]; + for(var i = 0; i < xs.shape[0]; i++) + { + for(var j = 0; j < xs.shape[1]; j++) + { + if ((int)xs[i][j] == 0 || skip_top <= (int)xs[i][j] && (int)xs[i][j] < num_words) + new_xs[i, j] = (int)xs[i][j]; + else + new_xs[i, j] = (int)oov_char; + } + } + xs = new NDArray(new_xs); + } + else + { + int[,] new_xs = new int[xs.shape[0], xs.shape[1]]; + for (var i = 0; i < xs.shape[0]; i++) + { + int k = 0; + for (var j = 0; j < xs.shape[1]; j++) + { + if ((int)xs[i][j] == 0 || skip_top <= (int)xs[i][j] && (int)xs[i][j] < num_words) + new_xs[i, k++] = (int)xs[i][j]; + } + } + xs = new NDArray(new_xs); + } + + var idx = len(x_train); + x_train = xs[$"0:{idx}"]; + x_test = xs[$"{idx}:"]; + var y_train = labels[$"0:{idx}"]; + var y_test = labels[$"{idx}:"]; return new DatasetPass { @@ -125,8 +218,8 @@ public DatasetPass load_data(string? path = "imdb.npz", (NDArray, NDArray) LoadX(byte[] bytes) { - var y = np.Load_Npz(bytes); - return (y["x_train.npy"], y["x_test.npy"]); + var x = np.Load_Npz(bytes); + return (x["x_train.npy"], x["x_test.npy"]); } (NDArray, NDArray) LoadY(byte[] bytes) @@ -134,34 +227,5 @@ public DatasetPass load_data(string? path = "imdb.npz", var y = np.Load_Npz(bytes); return (y["y_train.npy"], y["y_test.npy"]); } - - string Download() - { - var dst = Path.Combine(Path.GetTempPath(), dest_folder); - Directory.CreateDirectory(dst); - - Web.Download(origin_folder + file_name, dst, file_name); - - return dst; - // return Path.Combine(dst, file_name); - } - - protected IEnumerable PraseData(string[] x) - { - var data_list = new List(); - for (int i = 0; i < len(x); i++) - { - var list_string = x[i]; - var cleaned_list_string = list_string.Replace("[", "").Replace("]", "").Replace(" ", ""); - string[] number_strings = cleaned_list_string.Split(','); - int[] numbers = new int[number_strings.Length]; - for (int j = 0; j < number_strings.Length; j++) - { - numbers[j] = int.Parse(number_strings[j]); - } - data_list.Add(numbers); - } - return data_list; - } } } diff --git a/src/TensorFlowNET.Keras/Utils/data_utils.cs b/src/TensorFlowNET.Keras/Utils/data_utils.cs index 5b84c601f..16b121b07 100644 --- a/src/TensorFlowNET.Keras/Utils/data_utils.cs +++ b/src/TensorFlowNET.Keras/Utils/data_utils.cs @@ -39,5 +39,52 @@ public static string get_file(string fname, string origin, return datadir; } + + public static (NDArray, NDArray) _remove_long_seq(int maxlen, NDArray seq, NDArray label) + { + /*Removes sequences that exceed the maximum length. + + Args: + maxlen: Int, maximum length of the output sequences. + seq: List of lists, where each sublist is a sequence. + label: List where each element is an integer. + + Returns: + new_seq, new_label: shortened lists for `seq` and `label`. + + */ + List new_seq = new List(); + List new_label = new List(); + + for (var i = 0; i < seq.shape[0]; i++) + { + if (maxlen < seq.shape[1] && seq[i][maxlen] != 0) + continue; + int[] sentence = new int[maxlen]; + for (var j = 0; j < maxlen && j < seq.shape[1]; j++) + { + sentence[j] = seq[i, j]; + } + new_seq.Add(sentence); + new_label.Add(label[i]); + } + + int[,] new_seq_array = new int[new_seq.Count, maxlen]; + int[] new_label_array = new int[new_label.Count]; + + for (var i = 0; i < new_seq.Count; i++) + { + for (var j = 0; j < maxlen; j++) + { + new_seq_array[i, j] = new_seq[i][j]; + } + } + + for (var i = 0; i < new_label.Count; i++) + { + new_label_array[i] = new_label[i]; + } + return (new_seq_array, new_label_array); + } } } diff --git a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs index db6252efc..251eeff90 100644 --- a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs +++ b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs @@ -1,6 +1,8 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; +using System.Collections.Generic; using System.Linq; +using Tensorflow.NumPy; using static Tensorflow.Binding; using static Tensorflow.KerasApi; @@ -207,10 +209,28 @@ public void GetData() var y_train = dataset.Train.Item2; var x_val = dataset.Test.Item1; var y_val = dataset.Test.Item2; - print(len(x_train) + "Training sequences"); - print(len(x_val) + "Validation sequences"); - //x_train = keras.preprocessing.sequence.pad_sequences((IEnumerable)x_train, maxlen: maxlen); - //x_val = keras.preprocessing.sequence.pad_sequences((IEnumerable)x_val, maxlen: maxlen); + + x_train = keras.preprocessing.sequence.pad_sequences(RemoveZeros(x_train), maxlen: maxlen); + x_val = keras.preprocessing.sequence.pad_sequences(RemoveZeros(x_val), maxlen: maxlen); + print(len(x_train) + " Training sequences"); + print(len(x_val) + " Validation sequences"); + } + IEnumerable RemoveZeros(NDArray data) + { + List new_data = new List(); + for (var i = 0; i < data.shape[0]; i++) + { + List new_array = new List(); + for (var j = 0; j < data.shape[1]; j++) + { + if (data[i][j] == 0) + break; + else + new_array.Add((int)data[i][j]); + } + new_data.Add(new_array.ToArray()); + } + return new_data; } } } From f57a6fe6ed006f79511f4cc9550eeda312b11e98 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Sat, 9 Sep 2023 18:31:46 +0800 Subject: [PATCH 116/182] optimize the time complexity of Imdb dataset loader --- src/TensorFlowNET.Keras/Datasets/Imdb.cs | 101 ++++++++++-------- src/TensorFlowNET.Keras/Utils/data_utils.cs | 16 +-- .../Dataset/DatasetTest.cs | 11 +- 3 files changed, 71 insertions(+), 57 deletions(-) diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 0266b48bd..49fc79251 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -94,8 +94,6 @@ public DatasetPass load_data( var fileBytes = File.ReadAllBytes(path); var (x_train, x_test) = LoadX(fileBytes); var (labels_train, labels_test) = LoadY(fileBytes); - x_test.astype(np.int32); - labels_test.astype(np.int32); var indices = np.arange(len(x_train)); np.random.shuffle(indices, seed); @@ -107,67 +105,80 @@ public DatasetPass load_data( x_test = x_test[indices]; labels_test = labels_test[indices]; + var x_train_array = (int[,])x_train.ToMultiDimArray(); + var x_test_array = (int[,])x_test.ToMultiDimArray(); + var labels_train_array = (long[])labels_train.ToArray(); + var labels_test_array = (long[])labels_test.ToArray(); + if (start_char != null) { - int[,] new_x_train = new int[x_train.shape[0], x_train.shape[1] + 1]; - for (var i = 0; i < x_train.shape[0]; i++) + int[,] new_x_train_array = new int[x_train_array.GetLength(0), x_train_array.GetLength(1) + 1]; + for (var i = 0; i < x_train_array.GetLength(0); i++) { - new_x_train[i, 0] = (int)start_char; - for (var j = 0; j < x_train.shape[1]; j++) + new_x_train_array[i, 0] = (int)start_char; + for (var j = 0; j < x_train_array.GetLength(1); j++) { - new_x_train[i, j + 1] = x_train[i][j]; + if (x_train_array[i, j] == 0) + break; + new_x_train_array[i, j + 1] = x_train_array[i, j]; } } - int[,] new_x_test = new int[x_test.shape[0], x_test.shape[1] + 1]; - for (var i = 0; i < x_test.shape[0]; i++) + int[,] new_x_test_array = new int[x_test_array.GetLength(0), x_test_array.GetLength(1) + 1]; + for (var i = 0; i < x_test_array.GetLength(0); i++) { - new_x_test[i, 0] = (int)start_char; - for (var j = 0; j < x_test.shape[1]; j++) + new_x_test_array[i, 0] = (int)start_char; + for (var j = 0; j < x_test_array.GetLength(1); j++) { - new_x_test[i, j + 1] = x_test[i][j]; + if (x_test_array[i, j] == 0) + break; + new_x_test_array[i, j + 1] = x_test_array[i, j]; } } - x_train = new NDArray(new_x_train); - x_test = new NDArray(new_x_test); + x_train_array = new_x_train_array; + x_test_array = new_x_test_array; } else if (index_from != 0) { - for (var i = 0; i < x_train.shape[0]; i++) + for (var i = 0; i < x_train_array.GetLength(0); i++) { - for (var j = 0; j < x_train.shape[1]; j++) + for (var j = 0; j < x_train_array.GetLength(1); j++) { - if (x_train[i, j] != 0) - x_train[i, j] += index_from; + if (x_train_array[i, j] == 0) + break; + x_train_array[i, j] += index_from; } } - for (var i = 0; i < x_test.shape[0]; i++) + for (var i = 0; i < x_test_array.GetLength(0); i++) { - for (var j = 0; j < x_test.shape[1]; j++) + for (var j = 0; j < x_test_array.GetLength(1); j++) { - if (x_test[i, j] != 0) - x_test[i, j] += index_from; + if (x_test_array[i, j] == 0) + break; + x_test[i, j] += index_from; } } } - if (maxlen != null) + if (maxlen == null) { - (x_train, labels_train) = data_utils._remove_long_seq((int)maxlen, x_train, labels_train); - (x_test, labels_test) = data_utils._remove_long_seq((int)maxlen, x_test, labels_test); - if (x_train.size == 0 || x_test.size == 0) - throw new ValueError("After filtering for sequences shorter than maxlen=" + - $"{maxlen}, no sequence was kept. Increase maxlen."); + maxlen = max(x_train_array.GetLength(1), x_test_array.GetLength(1)); } + (x_train, labels_train) = data_utils._remove_long_seq((int)maxlen, x_train_array, labels_train_array); + (x_test, labels_test) = data_utils._remove_long_seq((int)maxlen, x_test_array, labels_test_array); + if (x_train.size == 0 || x_test.size == 0) + throw new ValueError("After filtering for sequences shorter than maxlen=" + + $"{maxlen}, no sequence was kept. Increase maxlen."); var xs = np.concatenate(new[] { x_train, x_test }); var labels = np.concatenate(new[] { labels_train, labels_test }); + var xs_array = (int[,])xs.ToMultiDimArray(); - if(num_words == null) + if (num_words == null) { num_words = 0; - for (var i = 0; i < xs.shape[0]; i++) - for (var j = 0; j < xs.shape[1]; j++) - num_words = max((int)num_words, (int)xs[i][j]); + for (var i = 0; i < xs_array.GetLength(0); i++) + for (var j = 0; j < xs_array.GetLength(1); j++) + num_words = max((int)num_words, (int)xs_array[i, j]); } // by convention, use 2 as OOV word @@ -175,32 +186,32 @@ public DatasetPass load_data( // 0 (padding), 1 (start), 2 (OOV) if (oov_char != null) { - int[,] new_xs = new int[xs.shape[0], xs.shape[1]]; - for(var i = 0; i < xs.shape[0]; i++) + int[,] new_xs_array = new int[xs_array.GetLength(0), xs_array.GetLength(1)]; + for (var i = 0; i < xs_array.GetLength(0); i++) { - for(var j = 0; j < xs.shape[1]; j++) + for (var j = 0; j < xs_array.GetLength(1); j++) { - if ((int)xs[i][j] == 0 || skip_top <= (int)xs[i][j] && (int)xs[i][j] < num_words) - new_xs[i, j] = (int)xs[i][j]; + if (xs_array[i, j] == 0 || skip_top <= xs_array[i, j] && xs_array[i, j] < num_words) + new_xs_array[i, j] = xs_array[i, j]; else - new_xs[i, j] = (int)oov_char; + new_xs_array[i, j] = (int)oov_char; } } - xs = new NDArray(new_xs); + xs = new NDArray(new_xs_array); } else { - int[,] new_xs = new int[xs.shape[0], xs.shape[1]]; - for (var i = 0; i < xs.shape[0]; i++) + int[,] new_xs_array = new int[xs_array.GetLength(0), xs_array.GetLength(1)]; + for (var i = 0; i < xs_array.GetLength(0); i++) { int k = 0; - for (var j = 0; j < xs.shape[1]; j++) + for (var j = 0; j < xs_array.GetLength(1); j++) { - if ((int)xs[i][j] == 0 || skip_top <= (int)xs[i][j] && (int)xs[i][j] < num_words) - new_xs[i, k++] = (int)xs[i][j]; + if (xs_array[i, j] == 0 || skip_top <= xs_array[i, j] && xs_array[i, j] < num_words) + new_xs_array[i, k++] = xs_array[i, j]; } } - xs = new NDArray(new_xs); + xs = new NDArray(new_xs_array); } var idx = len(x_train); diff --git a/src/TensorFlowNET.Keras/Utils/data_utils.cs b/src/TensorFlowNET.Keras/Utils/data_utils.cs index 16b121b07..57ae76695 100644 --- a/src/TensorFlowNET.Keras/Utils/data_utils.cs +++ b/src/TensorFlowNET.Keras/Utils/data_utils.cs @@ -54,23 +54,25 @@ public static (NDArray, NDArray) _remove_long_seq(int maxlen, NDArray seq, NDArr */ List new_seq = new List(); - List new_label = new List(); + List new_label = new List(); - for (var i = 0; i < seq.shape[0]; i++) + var seq_array = (int[,])seq.ToMultiDimArray(); + var label_array = (long[])label.ToArray(); + for (var i = 0; i < seq_array.GetLength(0); i++) { - if (maxlen < seq.shape[1] && seq[i][maxlen] != 0) + if (maxlen < seq_array.GetLength(1) && seq_array[i,maxlen] != 0) continue; int[] sentence = new int[maxlen]; - for (var j = 0; j < maxlen && j < seq.shape[1]; j++) + for (var j = 0; j < maxlen && j < seq_array.GetLength(1); j++) { - sentence[j] = seq[i, j]; + sentence[j] = seq_array[i, j]; } new_seq.Add(sentence); - new_label.Add(label[i]); + new_label.Add(label_array[i]); } int[,] new_seq_array = new int[new_seq.Count, maxlen]; - int[] new_label_array = new int[new_label.Count]; + long[] new_label_array = new long[new_label.Count]; for (var i = 0; i < new_seq.Count; i++) { diff --git a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs index 251eeff90..183544ab6 100644 --- a/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs +++ b/test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs @@ -204,7 +204,7 @@ public void GetData() { var vocab_size = 20000; // Only consider the top 20k words var maxlen = 200; // Only consider the first 200 words of each movie review - var dataset = keras.datasets.imdb.load_data(num_words: vocab_size); + var dataset = keras.datasets.imdb.load_data(num_words: vocab_size, maxlen: maxlen); var x_train = dataset.Train.Item1; var y_train = dataset.Train.Item2; var x_val = dataset.Test.Item1; @@ -217,16 +217,17 @@ public void GetData() } IEnumerable RemoveZeros(NDArray data) { + var data_array = (int[,])data.ToMultiDimArray(); List new_data = new List(); - for (var i = 0; i < data.shape[0]; i++) + for (var i = 0; i < data_array.GetLength(0); i++) { List new_array = new List(); - for (var j = 0; j < data.shape[1]; j++) + for (var j = 0; j < data_array.GetLength(1); j++) { - if (data[i][j] == 0) + if (data_array[i, j] == 0) break; else - new_array.Add((int)data[i][j]); + new_array.Add(data_array[i, j]); } new_data.Add(new_array.ToArray()); } From 114282885589956a29d7bcd015f55e966cb12532 Mon Sep 17 00:00:00 2001 From: Asaf Agami Date: Sun, 10 Sep 2023 18:09:38 +0300 Subject: [PATCH 117/182] fix: model does not stop on stop_training == true --- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index de57f19ae..d6f89d8be 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -224,6 +224,10 @@ History FitInternal(DataHandler data_handler, int epochs, int validation_step, i GC.Collect(); GC.WaitForPendingFinalizers(); + if (stop_training) + { + break; + } } return callbacks.History; @@ -283,6 +287,10 @@ History FitInternal(DataHandler data_handler, int epochs, int verbose, List Date: Wed, 13 Sep 2023 17:18:43 +0000 Subject: [PATCH 118/182] cached_session for graph tests --- .../ControlFlowTest/WhileContextTestCase.cs | 3 +- .../GradientTest/GradientTest.cs | 21 ++- .../PythonTest.cs | 148 +++++++++++++++++- 3 files changed, 156 insertions(+), 16 deletions(-) diff --git a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs index c637cf858..4dee61337 100644 --- a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs +++ b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs @@ -1,5 +1,6 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; +using System.Linq; using Tensorflow; using static Tensorflow.Binding; @@ -29,7 +30,7 @@ private void _testWhileContextHelper(int maximum_iterations) var b = new Func(x => math_ops.add(x, 1, name: "c")); //control_flow_ops.while_loop( // c, b, i , maximum_iterations: tf.constant(maximum_iterations)); - foreach (Operation op in sess.graph.get_operations()) + foreach (Operation op in sess.Single().graph.get_operations()) { var control_flow_context = op._get_control_flow_context(); /*if (control_flow_context != null) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index f240817b4..37bc646dd 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -388,22 +388,19 @@ public void testBoundaryStop() } - [Ignore("TODO")] [TestMethod] public void testBoundaryContinue() { - //@test_util.run_v1_only("b/120545219") - //def testBoundaryContinue(self): - // # Test that we differentiate both 'x' and 'y' correctly when x is a - // # predecessor of y. - // with self.cached_session(): - // x = constant(1.0) - // y = x * 2.0 - // z = y * 3.0 - // grads = gradients.gradients(z, [x, y]) - // self.assertTrue(all(x is not None for x in grads)) - // self.assertEqual(6.0, grads[0].eval()) + // Test that we differentiate both 'x' and 'y' correctly when x is a + // predecessor of y. + self.cached_session(); + var x = tf.constant(1.0); + var y = x * 2.0; + var z = y * 3.0; + var grads = tf.gradients(z, new[] { x, y }); + self.assertTrue(all(grads.Select(x => x != null))); + self.assertEqual(6.0, grads[0].eval()); } [Ignore("TODO")] diff --git a/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs b/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs index 513791933..90abc0cc9 100644 --- a/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs @@ -6,6 +6,8 @@ using System.Linq; using Tensorflow; using static Tensorflow.Binding; +using OneOf.Types; +using System.Collections.Generic; namespace TensorFlowNET.UnitTest { @@ -139,6 +141,21 @@ public void assertProtoEquals(object toProto, object o) #region tensor evaluation and test session + private Session _cached_session = null; + private Graph _cached_graph = null; + private object _cached_config = null; + private bool _cached_force_gpu = false; + + private void _ClearCachedSession() + { + if (self._cached_session != null) + { + self._cached_session.Dispose(); + self._cached_session = null; + } + } + + //protected object _eval_helper(Tensor[] tensors) //{ // if (tensors == null) @@ -203,10 +220,57 @@ public T evaluate(Tensor tensor) } } - - public Session cached_session() + ///Returns a TensorFlow Session for use in executing tests. + public IEnumerable cached_session( + Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) { - throw new NotImplementedException(); + // This method behaves differently than self.session(): for performance reasons + // `cached_session` will by default reuse the same session within the same + // test.The session returned by this function will only be closed at the end + // of the test(in the TearDown function). + + // Use the `use_gpu` and `force_gpu` options to control where ops are run.If + // `force_gpu` is True, all ops are pinned to `/ device:GPU:0`. Otherwise, if + // `use_gpu` is True, TensorFlow tries to run as many ops on the GPU as + // possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to + // the CPU. + + // Example: + // python + // class MyOperatorTest(test_util.TensorFlowTestCase) : + // def testMyOperator(self): + // with self.cached_session() as sess: + // valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] + // result = MyOperator(valid_input).eval() + // self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] + // invalid_input = [-1.0, 2.0, 7.0] + // with self.assertRaisesOpError("negative input not supported"): + // MyOperator(invalid_input).eval() + + + // Args: + // graph: Optional graph to use during the returned session. + // config: An optional config_pb2.ConfigProto to use to configure the + // session. + // use_gpu: If True, attempt to run as many ops as possible on GPU. + // force_gpu: If True, pin all ops to `/device:GPU:0`. + + // Yields: + // A Session object that should be used as a context manager to surround + // the graph building and execution code in a test case. + + + // TODO: + // if context.executing_eagerly(): + // return self._eval_helper(tensors) + // else: + { + var sess = self._get_cached_session( + graph, config, force_gpu, crash_if_inconsistent_args: true); + var cached = self._constrain_devices_and_set_default(sess, use_gpu, force_gpu); + return cached; + + } } //Returns a TensorFlow Session for use in executing tests. @@ -254,6 +318,40 @@ public Session session(Graph graph = null, object config = null, bool use_gpu = return s.as_default(); } + private IEnumerable _constrain_devices_and_set_default(Session sess, bool use_gpu, bool force_gpu) + { + // Set the session and its graph to global default and constrain devices.""" + // if context.executing_eagerly(): + // yield None + // else: + { + sess.graph.as_default(); + sess.as_default(); + { + if (force_gpu) + { + // TODO: + + // Use the name of an actual device if one is detected, or + // '/device:GPU:0' otherwise + /* var gpu_name = gpu_device_name(); + if (!gpu_name) + gpu_name = "/device:GPU:0" + using (sess.graph.device(gpu_name)) { + yield return sess; + }*/ + yield return sess; + } + else if (use_gpu) + yield return sess; + else + using (sess.graph.device("/device:CPU:0")) + yield return sess; + } + + } + } + // See session() for details. private Session _create_session(Graph graph, object cfg, bool forceGpu) { @@ -298,6 +396,50 @@ private Session _create_session(Graph graph, object cfg, bool forceGpu) return new Session(graph);//, config = prepare_config(config)) } + private Session _get_cached_session( + Graph graph = null, + object config = null, + bool force_gpu = false, + bool crash_if_inconsistent_args = true) + { + // See cached_session() for documentation. + if (self._cached_session == null) + { + var sess = self._create_session(graph, config, force_gpu); + self._cached_session = sess; + self._cached_graph = graph; + self._cached_config = config; + self._cached_force_gpu = force_gpu; + return sess; + } else { + + if (crash_if_inconsistent_args && !self._cached_graph.Equals(graph)) + throw new ValueError(@"The graph used to get the cached session is + different than the one that was used to create the + session. Maybe create a new session with + self.session()"); + if (crash_if_inconsistent_args && !self._cached_config.Equals(config)) { + throw new ValueError(@"The config used to get the cached session is + different than the one that was used to create the + session. Maybe create a new session with + self.session()"); + } + if (crash_if_inconsistent_args && !self._cached_force_gpu.Equals(force_gpu)) { + throw new ValueError(@"The force_gpu value used to get the cached session is + different than the one that was used to create the + session. Maybe create a new session with + self.session()"); + } + return _cached_session; + } + } + + [TestCleanup] + public void Cleanup() + { + _ClearCachedSession(); + } + #endregion public void AssetSequenceEqual(T[] a, T[] b) From ae50fa93bac27f9c7c77b7a38289f20d78480b3a Mon Sep 17 00:00:00 2001 From: Alexander Novikov Date: Thu, 14 Sep 2023 03:58:15 +0000 Subject: [PATCH 119/182] fix fleaky test boundary continue --- test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index 37bc646dd..0b4d79bb7 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -394,7 +394,7 @@ public void testBoundaryContinue() // Test that we differentiate both 'x' and 'y' correctly when x is a // predecessor of y. - self.cached_session(); + var sess = self.cached_session().Single(); var x = tf.constant(1.0); var y = x * 2.0; var z = y * 3.0; From 9d71cad96ecb69cd83c2b113fc808b608fbd7875 Mon Sep 17 00:00:00 2001 From: Alexander Novikov Date: Thu, 14 Sep 2023 11:21:18 +0000 Subject: [PATCH 120/182] using and no IEnumerable --- .../ControlFlowTest/WhileContextTestCase.cs | 4 ++-- .../GradientTest/GradientTest.cs | 16 ++++++++------ .../PythonTest.cs | 22 +++++++++---------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs index 4dee61337..e93324f3e 100644 --- a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs +++ b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs @@ -24,13 +24,13 @@ public void SimpleWhileLoop() private void _testWhileContextHelper(int maximum_iterations) { // TODO: implement missing code dependencies - var sess = this.cached_session(); + using var sess = this.cached_session(); var i = constant_op.constant(0, name: "i"); var c = new Func(x => gen_math_ops.less(x, ops.convert_to_tensor(10), name: "c")); var b = new Func(x => math_ops.add(x, 1, name: "c")); //control_flow_ops.while_loop( // c, b, i , maximum_iterations: tf.constant(maximum_iterations)); - foreach (Operation op in sess.Single().graph.get_operations()) + foreach (Operation op in sess.graph.get_operations()) { var control_flow_context = op._get_control_flow_context(); /*if (control_flow_context != null) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index 0b4d79bb7..099c11627 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -394,13 +394,15 @@ public void testBoundaryContinue() // Test that we differentiate both 'x' and 'y' correctly when x is a // predecessor of y. - var sess = self.cached_session().Single(); - var x = tf.constant(1.0); - var y = x * 2.0; - var z = y * 3.0; - var grads = tf.gradients(z, new[] { x, y }); - self.assertTrue(all(grads.Select(x => x != null))); - self.assertEqual(6.0, grads[0].eval()); + using (self.cached_session()) + { + var x = tf.constant(1.0); + var y = x * 2.0; + var z = y * 3.0; + var grads = tf.gradients(z, new[] { x, y }); + self.assertTrue(all(grads.Select(x => x != null))); + self.assertEqual(6.0, grads[0].eval()); + } } [Ignore("TODO")] diff --git a/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs b/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs index 90abc0cc9..ccf59f5ae 100644 --- a/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs @@ -221,7 +221,7 @@ public T evaluate(Tensor tensor) } ///Returns a TensorFlow Session for use in executing tests. - public IEnumerable cached_session( + public Session cached_session( Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) { // This method behaves differently than self.session(): for performance reasons @@ -267,9 +267,8 @@ public IEnumerable cached_session( { var sess = self._get_cached_session( graph, config, force_gpu, crash_if_inconsistent_args: true); - var cached = self._constrain_devices_and_set_default(sess, use_gpu, force_gpu); - return cached; - + using var cached = self._constrain_devices_and_set_default(sess, use_gpu, force_gpu); + return cached; } } @@ -318,13 +317,12 @@ public Session session(Graph graph = null, object config = null, bool use_gpu = return s.as_default(); } - private IEnumerable _constrain_devices_and_set_default(Session sess, bool use_gpu, bool force_gpu) + private Session _constrain_devices_and_set_default(Session sess, bool use_gpu, bool force_gpu) { // Set the session and its graph to global default and constrain devices.""" - // if context.executing_eagerly(): - // yield None - // else: - { + if (tf.executing_eagerly()) + return null; + else { sess.graph.as_default(); sess.as_default(); { @@ -340,13 +338,13 @@ private IEnumerable _constrain_devices_and_set_default(Session sess, bo using (sess.graph.device(gpu_name)) { yield return sess; }*/ - yield return sess; + return sess; } else if (use_gpu) - yield return sess; + return sess; else using (sess.graph.device("/device:CPU:0")) - yield return sess; + return sess; } } From adef5bcdc518d879ca385d37fe17ce5b2a329c44 Mon Sep 17 00:00:00 2001 From: Alexander Novikov Date: Thu, 14 Sep 2023 15:37:16 +0000 Subject: [PATCH 121/182] gradient tests --- .../GradientTest/GradientTest.cs | 383 +++++++++++------- 1 file changed, 236 insertions(+), 147 deletions(-) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index 099c11627..b0827f2ab 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -5,6 +5,7 @@ using System.Linq; using Tensorflow; using static Tensorflow.Binding; +using Tensorflow.Framework; namespace TensorFlowNET.UnitTest.Gradient { @@ -394,6 +395,8 @@ public void testBoundaryContinue() // Test that we differentiate both 'x' and 'y' correctly when x is a // predecessor of y. + //TODO: @test_util.run_v1_only("b/120545219") + using (self.cached_session()) { var x = tf.constant(1.0); @@ -402,66 +405,61 @@ public void testBoundaryContinue() var grads = tf.gradients(z, new[] { x, y }); self.assertTrue(all(grads.Select(x => x != null))); self.assertEqual(6.0, grads[0].eval()); - } + } } - [Ignore("TODO")] [TestMethod] public void testAggregationMethodAccumulateN() { + //TODO: @test_util.run_v1_only("b/120545219") - //@test_util.run_v1_only("b/120545219") - //def testAggregationMethodAccumulateN(self): - // with self.cached_session(): - // x = constant(1.0) - // y = x * 2.0 - // z = y + y + y + y + y + y + y + y + y + y - // grads = gradients.gradients( - // z, [x, y], - // aggregation_method=gradients.AggregationMethod. - // EXPERIMENTAL_ACCUMULATE_N) - // self.assertTrue(all(x is not None for x in grads)) - // self.assertEqual(20.0, grads[0].eval()) - // self.assertEqual(10.0, grads[1].eval()) - + using (self.cached_session()) + { + var x = tf.constant(1.0); + var y = x * 2.0; + var z = y + y + y + y + y + y + y + y + y + y; + var grads = tf.gradients(z, new[] { x, y }, + aggregation_method: AggregationMethod.EXPERIMENTAL_ACCUMULATE_N); + self.assertTrue(all(grads.Select(x => x != null))); + self.assertEqual(20.0, grads[0].eval()); + self.assertEqual(10.0, grads[1].eval()); + } } - [Ignore("TODO")] [TestMethod] public void testAggregationMethodAddN() { - //@test_util.run_v1_only("b/120545219") - //def testAggregationMethodAddN(self): - // with self.cached_session(): - // x = constant(1.0) - // y = x * 2.0 - // z = y + y + y + y + y + y + y + y + y + y - // grads = gradients.gradients( - // z, [x, y], aggregation_method=gradients.AggregationMethod.ADD_N) - // self.assertTrue(all(x is not None for x in grads)) - // self.assertEqual(20.0, grads[0].eval()) - // self.assertEqual(10.0, grads[1].eval()) - + //TODO: @test_util.run_v1_only("b/120545219") + using (self.cached_session()) + { + var x = tf.constant(1.0); + var y = x * 2.0; + var z = y + y + y + y + y + y + y + y + y + y; + var grads = tf.gradients(z, new[] { x, y }, + aggregation_method: AggregationMethod.ADD_N); + self.assertTrue(grads.All(x => x != null)); + self.assertEqual(20.0, grads[0].eval()); + self.assertEqual(10.0, grads[1].eval()); + } } - [Ignore("TODO")] [TestMethod] public void testAggregationMethodTree() { - //@test_util.run_v1_only("b/120545219") - //def testAggregationMethodTree(self): - // with self.cached_session(): - // x = constant(1.0) - // y = x * 2.0 - // z = y + y + y + y + y + y + y + y + y + y - // grads = gradients.gradients( - // z, [x, y], - // aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE) - // self.assertTrue(all(x is not None for x in grads)) - // self.assertEqual(20.0, grads[0].eval()) - // self.assertEqual(10.0, grads[1].eval()) + //TODO: @test_util.run_v1_only("b/120545219") + using (self.cached_session()) + { + var x = tf.constant(1.0); + var y = x * 2.0; + var z = y + y + y + y + y + y + y + y + y + y; + var grads = tf.gradients(z, new[] { x, y }, + aggregation_method: AggregationMethod.EXPERIMENTAL_TREE); + self.assertTrue(grads.All(x => x != null)); + self.assertEqual(20.0, grads[0].eval()); + self.assertEqual(10.0, grads[1].eval()); + } } [Ignore("TODO")] @@ -490,24 +488,32 @@ public void testNoGradientForStringOutputs() // self.assertTrue(isinstance(grads[0], ops.Tensor)) } - [Ignore("TODO")] [TestMethod] public void testSingletonIndexedSlices() { + tf.Graph().as_default(); + + var x = tf.placeholder(TF_DataType.TF_FLOAT); + var y = tf.identity(x); + var dy_indices = tf.placeholder(TF_DataType.TF_INT32); + var dy_values = tf.placeholder(TF_DataType.TF_FLOAT); + Tensor dy = new IndexedSlices(dy_values, dy_indices); + var dx = tf.gradients(new[] { y }, new[] { x }, grad_ys: new[] { dy })[0]; + // The IndexedSlices gradient of tf.identity is the identity map. + using (var sess = self.cached_session()) + { + var feed_dict = new FeedItem[] + { + ( x, new Tensor(new float[] { 1.0f }) ), + (dy_indices, new Tensor(new int[] { 0 })), + (dy_values, new Tensor(new float[] { 2.0f })) + }; + var result = sess.run(new[] { dx, dy }, feed_dict); + var vdx = result[0]; + var vdy = result[1]; + self.assertEqual(vdx, vdy); + } - //def testSingletonIndexedSlices(self): - // with ops.Graph().as_default(): - // x = array_ops.placeholder(dtypes.float32) - // y = array_ops.identity(x) - // dy = ops.IndexedSlices( - // array_ops.placeholder(dtypes.float32), - // array_ops.placeholder(dtypes.int32)) - // dx, = gradients.gradients(y, x, grad_ys=dy) - // # The IndexedSlices gradient of tf.identity is the identity map. - // with self.cached_session() as sess: - // vdx, vdy = sess.run( - // [dx, dy], feed_dict={x: [1.0], dy.indices: [0], dy.values: [2.0]}) - // self.assertEqual(vdx, vdy) } [Ignore("TODO")] @@ -575,26 +581,25 @@ public void testVariableRefGradient() // self.assertIsNotNone(gradient) } - [Ignore("TODO")] [TestMethod] public void testDependentYs() { - //@test_util.run_v1_only("b/120545219") - //def testDependentYs(self): - // with self.cached_session(): - // x = constant_op.constant(3.0) - // y = math_ops.square(x) - // y1 = math_ops.square(y) - // y2 = math_ops.square(y1) - // g = gradients.gradients([y, y2], x) - // self.assertAllClose(17502.0, g[0].eval()) - // g = gradients.gradients(y + y2, x) - // self.assertAllClose(17502.0, g[0].eval()) - // z = array_ops.identity(y) - // z2 = array_ops.identity(y2) - // g = gradients.gradients([z, z2], x) - // self.assertAllClose(17502.0, g[0].eval()) - + //TODO: @test_util.run_v1_only("b/120545219") + using (self.cached_session()) + { + var x = constant_op.constant(3.0); + var y = math_ops.square(x); + var y1 = math_ops.square(y); + var y2 = math_ops.square(y1); + var g = tf.gradients(new[] { y, y2 }, new[] { x }); + self.assertAllClose(17502.0, g[0].eval()); + g = tf.gradients(y + y2, x); + self.assertAllClose(17502.0, g[0].eval()); + var z = array_ops.identity(y); + var z2 = array_ops.identity(y2); + g = tf.gradients(new[] { z, z2 }, new[] { x }); + self.assertAllClose(17502.0, g[0].eval()); + } } [Ignore("TODO")] @@ -602,75 +607,152 @@ public void testDependentYs() public void testPartialDerivatives() { - //@test_util.run_v1_only("b/120545219") - //def testPartialDerivatives(self): - // with self.cached_session(): - // x = constant_op.constant(1.) - // y = 2 * x - // z = x + y - // totalg = gradients.gradients(z, [x, y]) - // self.assertEqual([3.0, 1.0], [g.eval() for g in totalg]) - // partialg = gradients.gradients(z, [x, y], stop_gradients=[x, y]) - // self.assertEqual([1.0, 1.0], [g.eval() for g in partialg]) + //TODO: @test_util.run_v1_only("b/120545219") + using (self.cached_session()) + { + var x = tf.constant(1.0); + var y = 2 * x; + var z = x + y; + var totalg = tf.gradients(z, new[] { x, y }); + self.assertEqual(new[] { 3.0, 1.0 }, totalg.Select(g => g.eval())); + var partialg = tf.gradients(z, new[] { x, y }, stop_gradients: new[] { x, y }); + self.assertEqual(new[] { 1.0, 1.0 }, partialg.Select(g => g.eval())); + } } - [Ignore("TODO")] + // TODO: remove when np.testing.assert_allclose(a, b) is implemented + private class CollectionComparer : System.Collections.IComparer + { + private readonly double _epsilon = 1e-07; + + public int Compare(object x, object y) + { + var a = (double)x; + var b = (double)y; + + double delta = Math.Abs(a - b); + if (delta < _epsilon) + { + return 0; + } + return a.CompareTo(b); + } + } + + private struct Case + { + public Tensor[] grad1; + public Tensor[] grad2; + public string constants; + public string variables; + } + + [Ignore("FIXME")] [TestMethod] public void testStopGradients() { + + //TODO: @test_util.run_v1_only("b/120545219") + Dictionary makeGraph(RandomizedImpl rng, string stop_gradients) + { + Tensor functionOf(Tensor[] xs, int k) + { + var shape = new Shape(k, k); + // TODO: replace by DefaultIfEmpty() before Aggregate(). + if (!xs.Any()) + { + return rng.random(shape).astype(np.float32); + } + return xs.Select(x => gen_math_ops.mat_mul(rng.random(shape).astype(np.float32), x)) + .Aggregate((t1, t2) => t1 + t2) + + rng.random(shape).astype(np.float32); + } + var a = functionOf(Array.Empty(), 3); + if (stop_gradients.Contains('a')) a = array_ops.stop_gradient(a); + var b = functionOf(new Tensor[] { a }, 3); + if (stop_gradients.Contains('b')) b = array_ops.stop_gradient(b); + var c = functionOf(new Tensor[] { a, b }, 3); + if (stop_gradients.Contains('c')) c = array_ops.stop_gradient(c); + var d = functionOf(new Tensor[] { b, c }, 3); + if (stop_gradients.Contains('d')) d = array_ops.stop_gradient(d); - //@test_util.run_v1_only("b/120545219") - //def testStopGradients(self): - // def _MakeGraph(rng, stop_gradients=()): - // def _FunctionOf(xs, k=3): - // return ops.convert_to_tensor( - // sum(math_ops.matmul(rng.rand(k, k), x) for x in xs) - // + rng.rand(k, k)) - - // a = _FunctionOf([]) - // if "a" in stop_gradients: a = array_ops.stop_gradient(a) - // b = _FunctionOf([a]) - // if "b" in stop_gradients: b = array_ops.stop_gradient(b) - // c = _FunctionOf([a, b]) - // if "c" in stop_gradients: c = array_ops.stop_gradient(c) - // d = _FunctionOf([b, c]) - // if "d" in stop_gradients: d = array_ops.stop_gradient(d) - // return dict(a=a, b=b, c=c, d=d) - - // def _Gradients(ys, xs, **kwargs): - // dydxs = gradients.gradients(ys, xs, **kwargs) - // dydxs = [0. * x if dydx is None else dydx - // for x, dydx in zip(xs, dydxs)] - // return dydxs - // seed = np.random.randint(1000) - // cases = [] - // subsets = [""] + "a b c d ab ac ad bc bd cd abc abd acd bcd abcd".split() - // graph = _MakeGraph(np.random.RandomState(seed)) - // for constants in subsets: - // graph_with_stops = _MakeGraph(np.random.RandomState(seed), constants) - // for variables_ in subsets: - // # compute the gradient when stopped using tf.stop_gradients - // grad1 = _Gradients([graph_with_stops["d"]], - // [graph_with_stops[v] for v in variables_]) - // # compute the gradient when stopped using the stop_gradients kwarg - // grad2 = _Gradients([graph["d"]], - // [graph[v] for v in variables_], - // stop_gradients=[graph[v] for v in constants]) - // cases.append(dict(grad1=grad1, grad2=grad2, - // constants=constants, variables=variables_)) - - // # evaluate all tensors in one call to session.run for speed - // with self.cached_session() as sess: - // results = sess.run([(case["grad1"], case["grad2"]) for case in cases]) - - // for (npgrad1, npgrad2), case in zip(results, cases): - // for a, b in zip(npgrad1, npgrad2): - // np.testing.assert_allclose(a, b) + return new Dictionary + { + { 'a', a }, + { 'b', b }, + { 'c', c }, + { 'd', d } + }; + } + + Tensor[] gradients(Tensor[] ys, Tensor[] xs, Tensor[] stop_gradients = null) + { + var dydxs = tf.gradients(ys, xs, stop_gradients); + dydxs = dydxs.Select((dydx, i) => dydx == null ? xs[i] * 0 : dydx).ToArray(); + return dydxs; + } + + var seed = np.random.randint(1000); + // TODO: remove next line when np.random.RandomState implemented. + tf.set_random_seed(seed); + var cases = new List(); + // TODO: add "" case. + var subsets = new List { "" }.Concat("a b c d ab ac ad bc bd cd abc abd acd bcd abcd".Split()); + // TODO: pass np.random.RandomState(seed) instead of np.random + var graph = makeGraph(np.random, string.Empty); + foreach (var constants in subsets) + { + var graphWithStops = makeGraph(np.random, constants); + foreach (var variables_ in subsets) + { + // compute the gradient when stopped using tf.stop_gradients + var grad1 = gradients( + new[] { graphWithStops['d'] }, + variables_.ToCharArray().Select(v => graphWithStops[v]).ToArray() + ); + // compute the gradient when stopped using the stop_gradients from args + var grad2 = gradients( + new[] { graph['d'] }, + variables_.ToCharArray().Select(v => graph[v]).ToArray(), + constants.ToCharArray().Select(c => graph[c]).DefaultIfEmpty(null)?.ToArray() + ); + cases.Add(new Case + { + grad1 = grad1, + grad2 = grad2, + variables = variables_, + constants = constants, + }) ; + } + } + // evaluate all tensors in one call to session.run for speed + using (var sess = self.cached_session()) + { + var results = sess.run( + cases.Select(case_ => ( + case_.grad1, + case_.grad2 + )).ToArray() + ); + + foreach (var (result, case_) in results.Zip(cases)) + { + var npgrad1 = result[0]; + var npgrad2 = result[1]; + foreach (var (a, b) in npgrad1.Zip(npgrad2)) + { + // TODO: np.testing.assert_allclose(a, b); + CollectionAssert.AreEqual(a.ToArray(), b.ToArray(), new CollectionComparer()); + } + } + } } - [Ignore("TODO")] + + + [Ignore("TODO: Unconnected gradients are not implemented")] [TestMethod] public void testUnconnectedGradientsNoneUnconnectedGradients() { @@ -685,7 +767,7 @@ public void testUnconnectedGradientsNoneUnconnectedGradients() // self.assertIsNone(grad[0]) } - [Ignore("TODO")] + [Ignore("TODO: Unconnected gradients are not implemented")] [TestMethod] public void testUnconnectedGradientsZerosUnconnectedGradients() { @@ -699,15 +781,21 @@ public void testUnconnectedGradientsZerosUnconnectedGradients() // [y], [x], unconnected_gradients="zero") // with self.cached_session() as sess: // self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(grads)[0]) + + // tf.Graph().as_default(); + // var x = tf.constant(1.0, shape: new long[] { 2, 2 }); + // var y = tf.constant(3.0, shape: new long[] { 3, 1 }); + // var grads = tf.gradients(new[] { y }, new[] { x }, unconnected_gradients: "zero"); + // using (self.cached_session()) + // { + // self.assertAllEqual(new[,] { { 0.0, 0.0 }, { 0.0, 0.0 } }, self.evaluate(grads)[0]); + // } } - [Ignore("TODO")] + [Ignore("TODO: Unconnected gradients are not implemented")] [TestMethod] public void testUnconnectedGradientsZeroConnectedGradients() { - - - //def testUnconnectedGradientsZeroConnectedGradients(self): // with ops.Graph().as_default(): // x = constant(1.0) @@ -716,9 +804,19 @@ public void testUnconnectedGradientsZeroConnectedGradients() // [y], [x], unconnected_gradients="zero") // with self.cached_session() as sess: // self.assertEquals(3.0, self.evaluate(grad)[0]) + + // tf.Graph().as_default(); + + // var x = tf.constant(1.0f); + // var y = x * 3.0f; + // var grad = tf.gradients(new [] { y }, new [] { x }, unconnected_gradients: "zero"); + // using (var sess = tf.Session()) + // { + // self.assertEquals(3.0, self.evaluate(grad)[0]); + // } } - [Ignore("TODO")] + [Ignore("TODO: Unconnected gradients are not implemented")] [TestMethod] public void testUnknownUnconnectedGradientsValueGiven() { @@ -729,15 +827,6 @@ public void testUnknownUnconnectedGradientsValueGiven() // with self.assertRaisesRegexp( // ValueError, "Unknown value for unconnected_gradients: 'nonsense'"): // gradients.gradients([y], [x], unconnected_gradients="nonsense") - } - - - - /* - - - - */ } } From a9dad3ce1114aa0b140472782d2ea4e36331107d Mon Sep 17 00:00:00 2001 From: Alexander Novikov Date: Thu, 14 Sep 2023 15:47:39 +0000 Subject: [PATCH 122/182] fixme labels --- test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index b0827f2ab..3ce6661cc 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -488,6 +488,7 @@ public void testNoGradientForStringOutputs() // self.assertTrue(isinstance(grads[0], ops.Tensor)) } + [Ignore("FIXME")] [TestMethod] public void testSingletonIndexedSlices() { From 628b2ce7366329f03390c4fffb9a8c779bb75663 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 15 Sep 2023 20:36:52 +0800 Subject: [PATCH 123/182] optimize temporal complexity of Imdb dataset loader --- src/TensorFlowNET.Keras/Datasets/Imdb.cs | 48 +++++++++------------ src/TensorFlowNET.Keras/Utils/data_utils.cs | 14 +++--- 2 files changed, 27 insertions(+), 35 deletions(-) diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 49fc79251..081c26cb9 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -116,23 +116,13 @@ public DatasetPass load_data( for (var i = 0; i < x_train_array.GetLength(0); i++) { new_x_train_array[i, 0] = (int)start_char; - for (var j = 0; j < x_train_array.GetLength(1); j++) - { - if (x_train_array[i, j] == 0) - break; - new_x_train_array[i, j + 1] = x_train_array[i, j]; - } + Array.Copy(x_train_array, i * x_train_array.GetLength(1), new_x_train_array, i * new_x_train_array.GetLength(1) + 1, x_train_array.GetLength(1)); } int[,] new_x_test_array = new int[x_test_array.GetLength(0), x_test_array.GetLength(1) + 1]; for (var i = 0; i < x_test_array.GetLength(0); i++) { new_x_test_array[i, 0] = (int)start_char; - for (var j = 0; j < x_test_array.GetLength(1); j++) - { - if (x_test_array[i, j] == 0) - break; - new_x_test_array[i, j + 1] = x_test_array[i, j]; - } + Array.Copy(x_test_array, i * x_test_array.GetLength(1), new_x_test_array, i * new_x_test_array.GetLength(1) + 1, x_test_array.GetLength(1)); } x_train_array = new_x_train_array; x_test_array = new_x_test_array; @@ -163,15 +153,19 @@ public DatasetPass load_data( { maxlen = max(x_train_array.GetLength(1), x_test_array.GetLength(1)); } - (x_train, labels_train) = data_utils._remove_long_seq((int)maxlen, x_train_array, labels_train_array); - (x_test, labels_test) = data_utils._remove_long_seq((int)maxlen, x_test_array, labels_test_array); - if (x_train.size == 0 || x_test.size == 0) + (x_train_array, labels_train_array) = data_utils._remove_long_seq((int)maxlen, x_train_array, labels_train_array); + (x_test_array, labels_test_array) = data_utils._remove_long_seq((int)maxlen, x_test_array, labels_test_array); + if (x_train_array.Length == 0 || x_test_array.Length == 0) throw new ValueError("After filtering for sequences shorter than maxlen=" + $"{maxlen}, no sequence was kept. Increase maxlen."); - var xs = np.concatenate(new[] { x_train, x_test }); - var labels = np.concatenate(new[] { labels_train, labels_test }); - var xs_array = (int[,])xs.ToMultiDimArray(); + int[,] xs_array = new int[x_train_array.GetLength(0) + x_test_array.GetLength(0), (int)maxlen]; + Array.Copy(x_train_array, xs_array, x_train_array.Length); + Array.Copy(x_test_array, 0, xs_array, x_train_array.Length, x_train_array.Length); + + long[] labels_array = new long[labels_train_array.Length + labels_test_array.Length]; + Array.Copy(labels_train_array, labels_array, labels_train_array.Length); + Array.Copy(labels_test_array, 0, labels_array, labels_train_array.Length, labels_test_array.Length); if (num_words == null) { @@ -197,7 +191,7 @@ public DatasetPass load_data( new_xs_array[i, j] = (int)oov_char; } } - xs = new NDArray(new_xs_array); + xs_array = new_xs_array; } else { @@ -211,19 +205,19 @@ public DatasetPass load_data( new_xs_array[i, k++] = xs_array[i, j]; } } - xs = new NDArray(new_xs_array); + xs_array = new_xs_array; } - var idx = len(x_train); - x_train = xs[$"0:{idx}"]; - x_test = xs[$"{idx}:"]; - var y_train = labels[$"0:{idx}"]; - var y_test = labels[$"{idx}:"]; + Array.Copy(xs_array, x_train_array, x_train_array.Length); + Array.Copy(xs_array, x_train_array.Length, x_test_array, 0, x_train_array.Length); + + Array.Copy(labels_array, labels_train_array, labels_train_array.Length); + Array.Copy(labels_array, labels_train_array.Length, labels_test_array, 0, labels_test_array.Length); return new DatasetPass { - Train = (x_train, y_train), - Test = (x_test, y_test) + Train = (x_train_array, labels_train_array), + Test = (x_test_array, labels_test_array) }; } diff --git a/src/TensorFlowNET.Keras/Utils/data_utils.cs b/src/TensorFlowNET.Keras/Utils/data_utils.cs index 57ae76695..e6db0ef72 100644 --- a/src/TensorFlowNET.Keras/Utils/data_utils.cs +++ b/src/TensorFlowNET.Keras/Utils/data_utils.cs @@ -40,7 +40,7 @@ public static string get_file(string fname, string origin, return datadir; } - public static (NDArray, NDArray) _remove_long_seq(int maxlen, NDArray seq, NDArray label) + public static (int[,], long[]) _remove_long_seq(int maxlen, int[,] seq, long[] label) { /*Removes sequences that exceed the maximum length. @@ -56,19 +56,17 @@ public static (NDArray, NDArray) _remove_long_seq(int maxlen, NDArray seq, NDArr List new_seq = new List(); List new_label = new List(); - var seq_array = (int[,])seq.ToMultiDimArray(); - var label_array = (long[])label.ToArray(); - for (var i = 0; i < seq_array.GetLength(0); i++) + for (var i = 0; i < seq.GetLength(0); i++) { - if (maxlen < seq_array.GetLength(1) && seq_array[i,maxlen] != 0) + if (maxlen < seq.GetLength(1) && seq[i, maxlen] != 0) continue; int[] sentence = new int[maxlen]; - for (var j = 0; j < maxlen && j < seq_array.GetLength(1); j++) + for (var j = 0; j < maxlen && j < seq.GetLength(1); j++) { - sentence[j] = seq_array[i, j]; + sentence[j] = seq[i, j]; } new_seq.Add(sentence); - new_label.Add(label_array[i]); + new_label.Add(label[i]); } int[,] new_seq_array = new int[new_seq.Count, maxlen]; From 57feb65dbc96fbe383d3dec1cee05bd3f34bb292 Mon Sep 17 00:00:00 2001 From: Alexander Novikov Date: Fri, 15 Sep 2023 14:57:48 +0000 Subject: [PATCH 124/182] comment IndexedSlices test --- .../GradientTest/GradientTest.cs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index 3ce6661cc..fc2280051 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -488,17 +488,20 @@ public void testNoGradientForStringOutputs() // self.assertTrue(isinstance(grads[0], ops.Tensor)) } - [Ignore("FIXME")] + [Ignore("TODO: CompositeTensors are not supported yet.")] [TestMethod] public void testSingletonIndexedSlices() { tf.Graph().as_default(); + // TODO: uncomment when CompositeTensors are supported. + /* var x = tf.placeholder(TF_DataType.TF_FLOAT); var y = tf.identity(x); var dy_indices = tf.placeholder(TF_DataType.TF_INT32); var dy_values = tf.placeholder(TF_DataType.TF_FLOAT); - Tensor dy = new IndexedSlices(dy_values, dy_indices); + var dy = new IndexedSlices(dy_values, dy_indices); + var dx = tf.gradients(new[] { y }, new[] { x }, grad_ys: new[] { dy })[0]; // The IndexedSlices gradient of tf.identity is the identity map. using (var sess = self.cached_session()) @@ -514,6 +517,7 @@ public void testSingletonIndexedSlices() var vdy = result[1]; self.assertEqual(vdx, vdy); } + */ } From 56e389154cc3252888761b7bb7c931e4dbe88064 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Mon, 18 Sep 2023 14:21:09 +0800 Subject: [PATCH 125/182] improve unpickler speed with BufferedStream --- .../NumPy/Implementation/NumPyImpl.Creation.cs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs index fa4ef0191..c0f9e695d 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs @@ -101,9 +101,10 @@ Array ReadValueMatrix(BinaryReader reader, Array matrix, int bytes, Type type, i Array ReadObjectMatrix(BinaryReader reader, Array matrix, int[] shape) { - Stream stream = reader.BaseStream; + Stream deflateStream = reader.BaseStream; + BufferedStream bufferedStream = new BufferedStream(deflateStream); var unpickler = new Unpickler(); - return (MultiArrayPickleWarpper)unpickler.load(stream); + return (MultiArrayPickleWarpper)unpickler.load(bufferedStream); } public (NDArray, NDArray) meshgrid(T[] array, bool copy = true, bool sparse = false) From 725ec1e55f83bae6e4745ddf0605bd15c40fbd92 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Mon, 18 Sep 2023 03:05:00 -0500 Subject: [PATCH 126/182] Optimize imdb.load_data --- src/TensorFlowNET.Keras/Datasets/Imdb.cs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 081c26cb9..1c9805189 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -180,10 +180,11 @@ public DatasetPass load_data( // 0 (padding), 1 (start), 2 (OOV) if (oov_char != null) { - int[,] new_xs_array = new int[xs_array.GetLength(0), xs_array.GetLength(1)]; - for (var i = 0; i < xs_array.GetLength(0); i++) + var (d1, d2) = (xs_array.GetLength(0), xs_array.GetLength(1)); + int[,] new_xs_array = new int[d1, d2]; + for (var i = 0; i < d1; i++) { - for (var j = 0; j < xs_array.GetLength(1); j++) + for (var j = 0; j < d2; j++) { if (xs_array[i, j] == 0 || skip_top <= xs_array[i, j] && xs_array[i, j] < num_words) new_xs_array[i, j] = xs_array[i, j]; @@ -195,11 +196,12 @@ public DatasetPass load_data( } else { - int[,] new_xs_array = new int[xs_array.GetLength(0), xs_array.GetLength(1)]; - for (var i = 0; i < xs_array.GetLength(0); i++) + var (d1, d2) = (xs_array.GetLength(0), xs_array.GetLength(1)); + int[,] new_xs_array = new int[d1, d2]; + for (var i = 0; i < d1; i++) { int k = 0; - for (var j = 0; j < xs_array.GetLength(1); j++) + for (var j = 0; j < d2; j++) { if (xs_array[i, j] == 0 || skip_top <= xs_array[i, j] && xs_array[i, j] < num_words) new_xs_array[i, k++] = xs_array[i, j]; From 9552d4cb7a51ea0081be027e15645dca11ea1239 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Thu, 21 Sep 2023 21:54:49 +0800 Subject: [PATCH 127/182] feat: add np.less and np.greater binding --- src/TensorFlowNET.Core/NumPy/Numpy.Math.cs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs b/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs index 5bc97952b..2559638b3 100644 --- a/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs +++ b/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs @@ -85,5 +85,11 @@ public static NDArray dot(NDArray x1, NDArray x2, NDArray? axes = null, string? [AutoNumPy] public static NDArray add(NDArray x, NDArray y) => new NDArray(math_ops.add(x, y)); + + [AutoNumPy] + public static NDArray greater(NDArray x, NDArray y) => new NDArray(tf.greater(x, y)); + + [AutoNumPy] + public static NDArray less(NDArray x, NDArray y) => new NDArray(tf.less(x, y)); } } From f809f6eacee83336ac7971d018686b7ee8999198 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Thu, 21 Sep 2023 21:56:22 +0800 Subject: [PATCH 128/182] fix: fix EarlyStopping --- .../Callbacks/Earlystopping.cs | 64 ++++++++++++------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs b/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs index 36993b637..a2a2ecfe2 100644 --- a/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs +++ b/src/TensorFlowNET.Keras/Callbacks/Earlystopping.cs @@ -19,8 +19,10 @@ public class EarlyStopping: ICallback string _monitor; string _mode; bool _restore_best_weights; - List? _best_weights; + List? _best_weights; CallbackParams _parameters; + Func _monitor_op; + public Dictionary>? history { get; set; } // user need to pass a CallbackParams to EarlyStopping, CallbackParams at least need the model public EarlyStopping(CallbackParams parameters,string monitor = "val_loss", float min_delta = 0f, int patience = 0, @@ -38,17 +40,49 @@ public EarlyStopping(CallbackParams parameters,string monitor = "val_loss", floa _min_delta = Math.Abs(min_delta); _restore_best_weights = restore_best_weights; _mode = mode; - if (mode != "auto" && mode != "min" && mode != "max") + + if (_mode != "auto" && _mode != "min" && _mode != "max") + { + Console.WriteLine($"EarlyStopping mode {_mode} is unknown, fallback to auto mode."); + _mode = "auto"; + } + + if (_mode == "min") + { + _monitor_op = np.less; + } + else if (_mode == "max") + { + _monitor_op = np.greater; + } + else + { + if (_monitor.EndsWith("acc") || _monitor.EndsWith("accuracy") || _monitor.EndsWith("auc")) + { + _monitor_op = np.greater; + } + else + { + _monitor_op = np.less; + } + } + + if (_monitor_op == np.greater) { - Console.WriteLine("EarlyStopping mode %s is unknown, fallback to auto mode.", mode); + _min_delta *= 1; + } + else + { + _min_delta *= -1; } } public void on_train_begin() { _wait = 0; _stopped_epoch = 0; + _best = _monitor_op == np.less ? (float)np.Inf : (float)-np.Inf; + _best_weights = null; _best_epoch = 0; - _best = (float)np.Inf; } public void on_epoch_begin(int epoch) @@ -74,7 +108,7 @@ public void on_epoch_end(int epoch, Dictionary epoch_logs) // Restore the weights after first epoch if no progress is ever made. if (_restore_best_weights && _best_weights == null) { - _best_weights = _parameters.Model.Weights; + _best_weights = _parameters.Model.get_weights(); } _wait += 1; @@ -83,7 +117,7 @@ public void on_epoch_end(int epoch, Dictionary epoch_logs) _best = current; _best_epoch = epoch; if (_restore_best_weights) - _best_weights = _parameters.Model.TrainableWeights; + _best_weights = _parameters.Model.get_weights(); // Only restart wait if we beat both the baseline and our previous best. if (_baseline == 0f || _is_improvement(current, _baseline)) _wait = 0; @@ -99,7 +133,7 @@ public void on_epoch_end(int epoch, Dictionary epoch_logs) { Console.WriteLine($"Restoring model weights from the end of the best epoch: {_best_epoch + 1}"); } - _parameters.Model.Weights = _best_weights; + _parameters.Model.set_weights(_best_weights); } } } @@ -131,21 +165,7 @@ float get_monitor_value(Dictionary logs) } public bool _is_improvement(float monitor_value, float reference_value) { - bool less_op = (monitor_value - _min_delta) < reference_value; - bool greater_op = (monitor_value - _min_delta) >= reference_value; - if (_mode == "min") - return less_op; - else if (_mode == "max") - return greater_op; - else - { - if (_monitor.EndsWith("acc") || _monitor.EndsWith("accuracy") || _monitor.EndsWith("auc")) - { - return greater_op; - } - else - return less_op; - } + return _monitor_op(monitor_value - _min_delta, reference_value); } public void on_test_end(Dictionary logs) From 9fb847991a1e45c0dbf40fd896b36b6d91953a24 Mon Sep 17 00:00:00 2001 From: lingbai-kong Date: Fri, 22 Sep 2023 18:34:08 +0800 Subject: [PATCH 129/182] fix: adjust imdb dataset loader for faster loading speed --- src/TensorFlowNET.Keras/Datasets/Imdb.cs | 29 ++++++++++++--------- src/TensorFlowNET.Keras/Utils/data_utils.cs | 8 +++--- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/src/TensorFlowNET.Keras/Datasets/Imdb.cs b/src/TensorFlowNET.Keras/Datasets/Imdb.cs index 1c9805189..4d6df913b 100644 --- a/src/TensorFlowNET.Keras/Datasets/Imdb.cs +++ b/src/TensorFlowNET.Keras/Datasets/Imdb.cs @@ -112,35 +112,39 @@ public DatasetPass load_data( if (start_char != null) { - int[,] new_x_train_array = new int[x_train_array.GetLength(0), x_train_array.GetLength(1) + 1]; - for (var i = 0; i < x_train_array.GetLength(0); i++) + var (d1, d2) = (x_train_array.GetLength(0), x_train_array.GetLength(1)); + int[,] new_x_train_array = new int[d1, d2 + 1]; + for (var i = 0; i < d1; i++) { new_x_train_array[i, 0] = (int)start_char; - Array.Copy(x_train_array, i * x_train_array.GetLength(1), new_x_train_array, i * new_x_train_array.GetLength(1) + 1, x_train_array.GetLength(1)); + Array.Copy(x_train_array, i * d2, new_x_train_array, i * (d2 + 1) + 1, d2); } - int[,] new_x_test_array = new int[x_test_array.GetLength(0), x_test_array.GetLength(1) + 1]; - for (var i = 0; i < x_test_array.GetLength(0); i++) + (d1, d2) = (x_test_array.GetLength(0), x_test_array.GetLength(1)); + int[,] new_x_test_array = new int[d1, d2 + 1]; + for (var i = 0; i < d1; i++) { new_x_test_array[i, 0] = (int)start_char; - Array.Copy(x_test_array, i * x_test_array.GetLength(1), new_x_test_array, i * new_x_test_array.GetLength(1) + 1, x_test_array.GetLength(1)); + Array.Copy(x_test_array, i * d2, new_x_test_array, i * (d2 + 1) + 1, d2); } x_train_array = new_x_train_array; x_test_array = new_x_test_array; } else if (index_from != 0) { - for (var i = 0; i < x_train_array.GetLength(0); i++) + var (d1, d2) = (x_train_array.GetLength(0), x_train_array.GetLength(1)); + for (var i = 0; i < d1; i++) { - for (var j = 0; j < x_train_array.GetLength(1); j++) + for (var j = 0; j < d2; j++) { if (x_train_array[i, j] == 0) break; x_train_array[i, j] += index_from; } } - for (var i = 0; i < x_test_array.GetLength(0); i++) + (d1, d2) = (x_test_array.GetLength(0), x_test_array.GetLength(1)); + for (var i = 0; i < d1; i++) { - for (var j = 0; j < x_test_array.GetLength(1); j++) + for (var j = 0; j < d2; j++) { if (x_test_array[i, j] == 0) break; @@ -169,9 +173,10 @@ public DatasetPass load_data( if (num_words == null) { + var (d1, d2) = (xs_array.GetLength(0), xs_array.GetLength(1)); num_words = 0; - for (var i = 0; i < xs_array.GetLength(0); i++) - for (var j = 0; j < xs_array.GetLength(1); j++) + for (var i = 0; i < d1; i++) + for (var j = 0; j < d2; j++) num_words = max((int)num_words, (int)xs_array[i, j]); } diff --git a/src/TensorFlowNET.Keras/Utils/data_utils.cs b/src/TensorFlowNET.Keras/Utils/data_utils.cs index e6db0ef72..b0bc15540 100644 --- a/src/TensorFlowNET.Keras/Utils/data_utils.cs +++ b/src/TensorFlowNET.Keras/Utils/data_utils.cs @@ -53,15 +53,17 @@ public static (int[,], long[]) _remove_long_seq(int maxlen, int[,] seq, long[] l new_seq, new_label: shortened lists for `seq` and `label`. */ + var nRow = seq.GetLength(0); + var nCol = seq.GetLength(1); List new_seq = new List(); List new_label = new List(); - for (var i = 0; i < seq.GetLength(0); i++) + for (var i = 0; i < nRow; i++) { - if (maxlen < seq.GetLength(1) && seq[i, maxlen] != 0) + if (maxlen < nCol && seq[i, maxlen] != 0) continue; int[] sentence = new int[maxlen]; - for (var j = 0; j < maxlen && j < seq.GetLength(1); j++) + for (var j = 0; j < maxlen && j < nCol; j++) { sentence[j] = seq[i, j]; } From eb4c1f4fb01bb02b7c7f87d5bee958bd9d4b0e42 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Sat, 23 Sep 2023 20:57:48 -0500 Subject: [PATCH 130/182] Release v0.110.4. --- src/TensorFlowNET.Core/Tensorflow.Binding.csproj | 9 +++++---- src/TensorFlowNET.Keras/Tensorflow.Keras.csproj | 6 +++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index be714618d..85c41bd2a 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -5,7 +5,7 @@ Tensorflow.Binding Tensorflow 2.11.0 - 0.110.3 + 0.110.4 10.0 enable Haiping Chen, Eli Belash, Yaohui Liu, Meinrad Recheis @@ -25,7 +25,8 @@ https://tensorflownet.readthedocs.io tf.net 0.110.x and above are based on tensorflow native 2.11.0 * Support RNN, LSTM model. * Support Transformer model. - + * Added IMDB dataset. + tf.net 0.100.x and above are based on tensorflow native 2.10.0 * Eager Mode is added finally. @@ -43,7 +44,7 @@ https://tensorflownet.readthedocs.io tf.net 0.10x.x aligns with TensorFlow v2.10.x native library. tf.net 0.11x.x aligns with TensorFlow v2.11.x native library. - 0.110.3.0 + 0.110.4.0 LICENSE true packages @@ -174,7 +175,7 @@ https://tensorflownet.readthedocs.io - + diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj index 36d1bc1d4..a0ee22284 100644 --- a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -7,7 +7,7 @@ enable Tensorflow.Keras AnyCPU;x64 - 0.11.3 + 0.11.4 Haiping Chen Keras for .NET Apache 2.0, Haiping Chen since 2018 @@ -42,8 +42,8 @@ Keras is an API designed for human beings, not machines. Keras follows best prac Git False Open.snk - 0.11.3.0 - 0.11.3.0 + 0.11.4.0 + 0.11.4.0 LICENSE Debug;Release;GPU From 21210795d0fb7963c13fb99604b7e7e46df2443d Mon Sep 17 00:00:00 2001 From: Alexander Novikov Date: Wed, 27 Sep 2023 13:16:28 +0000 Subject: [PATCH 131/182] gradient descent tests --- .../Variables/variables.py.cs | 7 +- .../GradientTest/GradientTest.cs | 2 - test/TensorFlowNET.UnitTest/PythonTest.cs | 178 +++++++++++++++++- .../Training/GradientDescentOptimizerTests.cs | 68 +++++++ 4 files changed, 250 insertions(+), 5 deletions(-) create mode 100644 test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs diff --git a/src/TensorFlowNET.Core/Variables/variables.py.cs b/src/TensorFlowNET.Core/Variables/variables.py.cs index 0c07e0243..f3ae248e6 100644 --- a/src/TensorFlowNET.Core/Variables/variables.py.cs +++ b/src/TensorFlowNET.Core/Variables/variables.py.cs @@ -72,7 +72,9 @@ public static List global_variables(string scope = null) public static Operation variables_initializer(IVariableV1[] var_list, string name = "init") { if (var_list.Length > 0) + { return control_flow_ops.group(var_list.Select(x => x.Initializer).ToArray(), name); + } else return gen_control_flow_ops.no_op(name: name); } @@ -155,7 +157,10 @@ public static Operation _safe_initial_value_from_op(string name, Operation op, D public static Tensor global_variables_initializer() { - throw new NotImplementedException(); + // if context.executing_eagerly(): + // return control_flow_ops.no_op(name = "global_variables_initializer") + var group = variables_initializer(global_variables().ToArray()); + return group; } } } diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index fc2280051..e2d6db912 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -776,8 +776,6 @@ public void testUnconnectedGradientsNoneUnconnectedGradients() [TestMethod] public void testUnconnectedGradientsZerosUnconnectedGradients() { - - //def testUnconnectedGradientsZerosUnconnectedGradients(self): // with ops.Graph().as_default(): // x = constant(1.0, shape=[2, 2]) diff --git a/test/TensorFlowNET.UnitTest/PythonTest.cs b/test/TensorFlowNET.UnitTest/PythonTest.cs index 50cc2b328..12fd72360 100644 --- a/test/TensorFlowNET.UnitTest/PythonTest.cs +++ b/test/TensorFlowNET.UnitTest/PythonTest.cs @@ -144,6 +144,37 @@ public void assertAllClose(double value, NDArray array2, double eps = 1e-5) Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); } + private class CollectionComparer : System.Collections.IComparer + { + private readonly double _epsilon; + + public CollectionComparer(double eps = 1e-06) { + _epsilon = eps; + } + public int Compare(object x, object y) + { + var a = (double)x; + var b = (double)y; + + double delta = Math.Abs(a - b); + if (delta < _epsilon) + { + return 0; + } + return a.CompareTo(b); + } + } + + public void assertAllCloseAccordingToType( + T[] expected, + T[] given, + double eps = 1e-6, + float float_eps = 1e-6f) + { + // TODO: check if any of arguments is not double and change toletance + CollectionAssert.AreEqual(expected, given, new CollectionComparer(eps)); + } + public void assertProtoEquals(object toProto, object o) { throw new NotImplementedException(); @@ -153,6 +184,20 @@ public void assertProtoEquals(object toProto, object o) #region tensor evaluation and test session + private Session _cached_session = null; + private Graph _cached_graph = null; + private object _cached_config = null; + private bool _cached_force_gpu = false; + + private void _ClearCachedSession() + { + if (self._cached_session != null) + { + self._cached_session.Dispose(); + self._cached_session = null; + } + } + //protected object _eval_helper(Tensor[] tensors) //{ // if (tensors == null) @@ -218,9 +263,56 @@ public T evaluate(Tensor tensor) } - public Session cached_session() + ///Returns a TensorFlow Session for use in executing tests. + public Session cached_session( + Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) { - throw new NotImplementedException(); + // This method behaves differently than self.session(): for performance reasons + // `cached_session` will by default reuse the same session within the same + // test.The session returned by this function will only be closed at the end + // of the test(in the TearDown function). + + // Use the `use_gpu` and `force_gpu` options to control where ops are run.If + // `force_gpu` is True, all ops are pinned to `/ device:GPU:0`. Otherwise, if + // `use_gpu` is True, TensorFlow tries to run as many ops on the GPU as + // possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to + // the CPU. + + // Example: + // python + // class MyOperatorTest(test_util.TensorFlowTestCase) : + // def testMyOperator(self): + // with self.cached_session() as sess: + // valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] + // result = MyOperator(valid_input).eval() + // self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] + // invalid_input = [-1.0, 2.0, 7.0] + // with self.assertRaisesOpError("negative input not supported"): + // MyOperator(invalid_input).eval() + + + // Args: + // graph: Optional graph to use during the returned session. + // config: An optional config_pb2.ConfigProto to use to configure the + // session. + // use_gpu: If True, attempt to run as many ops as possible on GPU. + // force_gpu: If True, pin all ops to `/device:GPU:0`. + + // Yields: + // A Session object that should be used as a context manager to surround + // the graph building and execution code in a test case. + + + // TODO: + // if context.executing_eagerly(): + // return self._eval_helper(tensors) + // else: + { + var sess = self._get_cached_session( + graph, config, force_gpu, crash_if_inconsistent_args: true); + using var cached = self._constrain_devices_and_set_default(sess, use_gpu, force_gpu); + return cached; + } } //Returns a TensorFlow Session for use in executing tests. @@ -268,6 +360,40 @@ public Session session(Graph graph = null, object config = null, bool use_gpu = return s.as_default(); } + private Session _constrain_devices_and_set_default(Session sess, bool use_gpu, bool force_gpu) + { + // Set the session and its graph to global default and constrain devices.""" + if (tf.executing_eagerly()) + return null; + else + { + sess.graph.as_default(); + sess.as_default(); + { + if (force_gpu) + { + // TODO: + + // Use the name of an actual device if one is detected, or + // '/device:GPU:0' otherwise + /* var gpu_name = gpu_device_name(); + if (!gpu_name) + gpu_name = "/device:GPU:0" + using (sess.graph.device(gpu_name)) { + yield return sess; + }*/ + return sess; + } + else if (use_gpu) + return sess; + else + using (sess.graph.device("/device:CPU:0")) + return sess; + } + + } + } + // See session() for details. private Session _create_session(Graph graph, object cfg, bool forceGpu) { @@ -312,6 +438,54 @@ private Session _create_session(Graph graph, object cfg, bool forceGpu) return new Session(graph);//, config = prepare_config(config)) } + private Session _get_cached_session( + Graph graph = null, + object config = null, + bool force_gpu = false, + bool crash_if_inconsistent_args = true) + { + // See cached_session() for documentation. + if (self._cached_session == null) + { + var sess = self._create_session(graph, config, force_gpu); + self._cached_session = sess; + self._cached_graph = graph; + self._cached_config = config; + self._cached_force_gpu = force_gpu; + return sess; + } + else + { + + if (crash_if_inconsistent_args && !self._cached_graph.Equals(graph)) + throw new ValueError(@"The graph used to get the cached session is + different than the one that was used to create the + session. Maybe create a new session with + self.session()"); + if (crash_if_inconsistent_args && !self._cached_config.Equals(config)) + { + throw new ValueError(@"The config used to get the cached session is + different than the one that was used to create the + session. Maybe create a new session with + self.session()"); + } + if (crash_if_inconsistent_args && !self._cached_force_gpu.Equals(force_gpu)) + { + throw new ValueError(@"The force_gpu value used to get the cached session is + different than the one that was used to create the + session. Maybe create a new session with + self.session()"); + } + return _cached_session; + } + } + + [TestCleanup] + public void Cleanup() + { + _ClearCachedSession(); + } + #endregion public void AssetSequenceEqual(T[] a, T[] b) diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs new file mode 100644 index 000000000..977544ae9 --- /dev/null +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -0,0 +1,68 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.Linq; +using System.Runtime.Intrinsics.X86; +using System.Security.AccessControl; +using Tensorflow.NumPy; +using TensorFlowNET.UnitTest; +using static Tensorflow.Binding; + +namespace Tensorflow.Keras.UnitTest.Optimizers +{ + [TestClass] + public class GradientDescentOptimizerTest : PythonTest + { + private void TestBasicGeneric() where T : struct + { + var dtype = Type.GetTypeCode(typeof(T)) switch + { + TypeCode.Single => np.float32, + TypeCode.Double => np.float64, + _ => throw new NotImplementedException(), + }; + + // train.GradientDescentOptimizer is V1 only API. + tf.Graph().as_default(); + using (self.cached_session()) + { + var var0 = tf.Variable(new[] { 1.0, 2.0 }, dtype: dtype); + var var1 = tf.Variable(new[] { 3.0, 4.0 }, dtype: dtype); + var grads0 = tf.constant(new[] { 0.1, 0.1 }, dtype: dtype); + var grads1 = tf.constant(new[] { 0.01, 0.01 }, dtype: dtype); + var optimizer = tf.train.GradientDescentOptimizer(3.0f); + var grads_and_vars = new[] { + Tuple.Create(grads0, var0 as IVariableV1), + Tuple.Create(grads1, var1 as IVariableV1) + }; + var sgd_op = optimizer.apply_gradients(grads_and_vars); + + var global_variables = variables.global_variables_initializer(); + self.evaluate(global_variables); + // Fetch params to validate initial values + // TODO: use self.evaluate instead of self.evaluate + self.assertAllCloseAccordingToType(new double[] { 1.0, 2.0 }, self.evaluate(var0)); + self.assertAllCloseAccordingToType(new double[] { 3.0, 4.0 }, self.evaluate(var1)); + // Run 1 step of sgd + sgd_op.run(); + // Validate updated params + self.assertAllCloseAccordingToType( + new double[] { 1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1 }, + self.evaluate(var0)); + self.assertAllCloseAccordingToType( + new double[] { 3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01 }, + self.evaluate(var1)); + // TODO: self.assertEqual(0, len(optimizer.variables())); + } + } + + [TestMethod] + public void TestBasic() + { + //TODO: add np.half + TestBasicGeneric(); + TestBasicGeneric(); + } + + + } +} From 02bfb9af176c13e8c37fe42ce600f4600ab8938d Mon Sep 17 00:00:00 2001 From: Beacontownfc <19636977267@qq.com> Date: Thu, 28 Sep 2023 15:22:13 +0000 Subject: [PATCH 132/182] improve raggedtensor --- .../Operations/array_ops.cs | 13 +++++ .../Tensors/Ragged/RaggedTensor.cs | 33 +++++++++++ .../Tensors/Ragged/RowPartition.cs | 55 +++++++++++++++++++ .../ManagedAPI/RaggedTensorTest.cs | 26 +++++++++ 4 files changed, 127 insertions(+) create mode 100644 test/TensorFlowNET.UnitTest/ManagedAPI/RaggedTensorTest.cs diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index f80dcd2c4..fdc53cd7e 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -1139,5 +1139,18 @@ public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string n var _op = tf.OpDefLib._apply_op_helper("Placeholder", name: name, args: new { dtype, shape }); return _op.output; } + + public static int get_positive_axis(int axis, int ndims=-100, string axis_name="axis", string ndims_name= "ndims") + { + if(ndims != -100) + { + if (axis >= 0 && axis < ndims) return axis; + else if (-ndims <= axis && axis < 0) return axis + ndims; + else throw new ValueError($"{axis_name}={axis} out of bounds:expected {-ndims}<={axis_name}<{ndims}"); + + } else if(axis < 0) throw new ValueError($"{axis_name}={axis} may only be negative if {ndims_name} is statically known."); + return axis; + } + } } diff --git a/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs b/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs index 4f85e1081..0f09d4128 100644 --- a/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs @@ -163,5 +163,38 @@ public static implicit operator RaggedTensor(Tensor tensor) { return tensor.Tag as RaggedTensor; } + public Tensor nrows(TF_DataType out_type, string name = null) + { + tf_with(ops.name_scope(name, "RaggedNRows"), scope => + { + return math_ops.cast(this._row_partition.nrows(), dtype: out_type); + }); + return null; + } + public RaggedTensor row_lengths(int axis=-1, string name=null) + { + if (axis == 0) return this._row_partition.nrows(); + if (axis == 1) return this._row_partition.row_lengths(); + var values = (RaggedTensor)this._values; + axis = array_ops.get_positive_axis( + axis, this.shape.rank, ndims_name: "rank(this)"); + if (axis == 0) return this.nrows(this._row_partition.GetDataType()); + else if (axis == 1) + { + var splits = this._row_partition.row_splits; + return splits[new Slice(start: 1)] - splits[new Slice(stop: -1)]; + + } + else if (this._values is RaggedTensor) + { + return values.row_lengths(axis - 1); + } + else + { + var shape = array_ops.shape(values, out_type: this._row_partition.GetDataType()); + return array_ops.ones(shape[new Slice(stop:axis - 1)], this._row_partition.GetDataType()) * + shape[axis - 1]; + } + } } } diff --git a/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs b/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs index 29dc525df..9e242ff38 100644 --- a/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs +++ b/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs @@ -14,10 +14,15 @@ You may obtain a copy of the License at limitations under the License. ******************************************************************************/ +using Serilog.Debugging; using System; +using System.Collections.Concurrent; using System.Collections.Generic; +//using System.ComponentModel.DataAnnotations; using System.Text; +using System.Xml.Linq; using Tensorflow.Framework; +using Tensorflow.NumPy; using static Tensorflow.Binding; namespace Tensorflow @@ -99,5 +104,55 @@ public static RowPartition from_row_splits(Tensor row_splits, return new RowPartition(row_splits); }); } + + public static RowPartition from_row_lengths(Tensor row_lengths, + bool validate=true, + TF_DataType dtype = TF_DataType.TF_INT32, + TF_DataType dtype_hint= TF_DataType.TF_INT32) + { + row_lengths = _convert_row_partition( + row_lengths, "row_lengths", dtype_hint: dtype_hint, dtype: dtype); + Tensor row_limits = math_ops.cumsum(row_lengths, tf.constant(-1)); + Tensor row_splits = array_ops.concat(new Tensor[] { tf.convert_to_tensor(np.array(new int[] { 0 }, TF_DataType.TF_INT64)), row_limits }, axis:0); + return new RowPartition(row_splits: row_splits, row_lengths: row_lengths); + } + + public static Tensor _convert_row_partition(Tensor partition, string name, TF_DataType dtype, + TF_DataType dtype_hint= TF_DataType.TF_INT64) + { + if (partition is NDArray && partition.GetDataType() == np.int32) partition = ops.convert_to_tensor(partition, name: name); + if (partition.GetDataType() != np.int32 && partition.GetDataType() != np.int64) throw new ValueError($"{name} must have dtype int32 or int64"); + return partition; + } + + public Tensor nrows() + { + /*Returns the number of rows created by this `RowPartition*/ + if (this._nrows != null) return this._nrows; + var nsplits = tensor_shape.dimension_at_index(this._row_splits.shape, 0); + if (nsplits == null) return array_ops.shape(this._row_splits, out_type: this.row_splits.dtype)[0] - 1; + else return constant_op.constant(nsplits.value - 1, dtype: this.row_splits.dtype); + } + + public Tensor row_lengths() + { + + if (this._row_splits != null) + { + int nrows_plus_one = tensor_shape.dimension_value(this._row_splits.shape[0]); + return tf.constant(nrows_plus_one - 1); + + } + if (this._row_lengths != null) + { + var nrows = tensor_shape.dimension_value(this._row_lengths.shape[0]); + return tf.constant(nrows); + } + if(this._nrows != null) + { + return tensor_util.constant_value(this._nrows); + } + return tf.constant(-1); + } } } diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/RaggedTensorTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/RaggedTensorTest.cs new file mode 100644 index 000000000..7a3de882e --- /dev/null +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/RaggedTensorTest.cs @@ -0,0 +1,26 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; +using Tensorflow.NumPy; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.ManagedAPI +{ + public class RaggedTensorTest :EagerModeTestBase + { + [TestMethod] + public void Test_from_row_lengths() + { + var row_lengths = tf.convert_to_tensor(np.array(new int[] { 2, 0, 3, 1, 1 }, TF_DataType.TF_INT64)); + var rp = RowPartition.from_row_lengths(row_lengths, validate: false); + var rp_row_lengths = rp.row_lengths(); + var rp_nrows = rp.nrows(); + Assert.IsTrue(rp_nrows.ToArray()[0] == rp.nrows().ToArray()[0]); + + } + } +} From f5af07ce5efc938686c897db57f0a33ec371adec Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Mon, 2 Oct 2023 00:23:56 +0800 Subject: [PATCH 133/182] feat: add the implementation of sample_weight in model.fit --- .../Keras/ArgsDefinition/DataAdapterArgs.cs | 3 + .../Keras/ArgsDefinition/DataHandlerArgs.cs | 3 + src/TensorFlowNET.Core/Keras/Engine/IModel.cs | 11 +- src/TensorFlowNET.Core/Util/Data.cs | 66 +++++++++ .../Engine/DataAdapters/DataAdapter.cs | 59 ++++++++ .../Engine/DataAdapters/DataHandler.cs | 3 + .../Engine/DataAdapters/IDataAdapter.cs | 2 + .../DataAdapters/TensorLikeDataAdapter.cs | 7 +- .../Engine/LossesContainer.cs | 4 +- .../Engine/Model.Evaluate.cs | 19 ++- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 129 ++++++------------ src/TensorFlowNET.Keras/Engine/Model.Train.cs | 40 +++++- .../Layers/Rnn.Test.cs | 4 +- 13 files changed, 250 insertions(+), 100 deletions(-) create mode 100644 src/TensorFlowNET.Core/Util/Data.cs diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataAdapterArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataAdapterArgs.cs index 78882e82d..ba0332836 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataAdapterArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataAdapterArgs.cs @@ -1,5 +1,6 @@ using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.NumPy; namespace Tensorflow.Keras.ArgsDefinition { @@ -16,5 +17,7 @@ public class DataAdapterArgs: IKerasConfig public int Worker { get; set; } public bool UseMultiprocessing { get; set; } public IModel Model { get; set; } + public Dictionary ClassWeight = null; + public NDArray SampleWeight = null; } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataHandlerArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataHandlerArgs.cs index 82530e950..72d0bb811 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataHandlerArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataHandlerArgs.cs @@ -1,5 +1,6 @@ using Tensorflow.Keras.Engine; using Tensorflow.Keras.Saving; +using Tensorflow.NumPy; namespace Tensorflow.Keras.ArgsDefinition { @@ -18,5 +19,7 @@ public class DataHandlerArgs: IKerasConfig public bool UseMultiprocessing { get; set; } = false; public IModel Model { get; set; } public IVariableV1 StepsPerExecution { get; set; } + public Dictionary ClassWeight = null; + public NDArray SampleWeight = null; } } diff --git a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs index 19f3df9ba..1840f88b9 100644 --- a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs +++ b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs @@ -3,6 +3,7 @@ using Tensorflow.Keras.Metrics; using Tensorflow.Keras.Saving; using Tensorflow.NumPy; +using Tensorflow.Util; namespace Tensorflow.Keras.Engine; @@ -22,8 +23,10 @@ ICallback fit(NDArray x, NDArray y, int verbose = 1, List callbacks = null, float validation_split = 0f, - (NDArray val_x, NDArray val_y)? validation_data = null, + ValidationDataPack validation_data = null, bool shuffle = true, + Dictionary class_weight = null, + NDArray sample_weight = null, int initial_epoch = 0, int max_queue_size = 10, int workers = 1, @@ -35,8 +38,10 @@ ICallback fit(IEnumerable x, NDArray y, int verbose = 1, List callbacks = null, float validation_split = 0f, - (IEnumerable val_x, NDArray val_y)? validation_data = null, + ValidationDataPack validation_data = null, bool shuffle = true, + Dictionary class_weight = null, + NDArray sample_weight = null, int initial_epoch = 0, int max_queue_size = 10, int workers = 1, @@ -63,6 +68,8 @@ void load_weights(string filepath, Dictionary evaluate(NDArray x, NDArray y, int batch_size = -1, int verbose = 1, + NDArray sample_weight = null, + int steps = -1, int max_queue_size = 10, int workers = 1, diff --git a/src/TensorFlowNET.Core/Util/Data.cs b/src/TensorFlowNET.Core/Util/Data.cs new file mode 100644 index 000000000..a14c69b18 --- /dev/null +++ b/src/TensorFlowNET.Core/Util/Data.cs @@ -0,0 +1,66 @@ +using Tensorflow.NumPy; + +namespace Tensorflow.Util +{ + /// + /// ValidationDataPack is used to pass validation data to fit method. + /// It can recive data which could be A tuple `(x_val, xy_val)` or `(x_val, y_val, sample_weight_val)` of Numpy arrays. + /// + public class ValidationDataPack + { + public NDArray val_x; + public NDArray val_y; + public NDArray val_sample_weight = null; + + public ValidationDataPack((NDArray, NDArray) validation_data) + { + this.val_x = validation_data.Item1; + this.val_y = validation_data.Item2; + } + + public ValidationDataPack((NDArray, NDArray, NDArray) validation_data) + { + this.val_x = validation_data.Item1; + this.val_y = validation_data.Item2; + this.val_sample_weight = validation_data.Item3; + } + + public ValidationDataPack((IEnumerable, NDArray) validation_data) + { + this.val_x = validation_data.Item1.ToArray()[0]; + this.val_y = validation_data.Item2; + } + + public ValidationDataPack((IEnumerable, NDArray, NDArray) validation_data) + { + this.val_x = validation_data.Item1.ToArray()[0]; + this.val_y = validation_data.Item2; + this.val_sample_weight = validation_data.Item3; + } + + public static implicit operator ValidationDataPack((NDArray, NDArray) validation_data) + => new ValidationDataPack(validation_data); + + public static implicit operator ValidationDataPack((NDArray, NDArray, NDArray) validation_data) + => new ValidationDataPack(validation_data); + + public static implicit operator ValidationDataPack((IEnumerable, NDArray) validation_data) + => new ValidationDataPack(validation_data); + + public static implicit operator ValidationDataPack((IEnumerable, NDArray, NDArray) validation_data) + => new ValidationDataPack(validation_data); + + public void Deconstruct(out NDArray val_x, out NDArray val_y) + { + val_x = this.val_x; + val_y = this.val_y; + } + + public void Deconstruct(out NDArray val_x, out NDArray val_y, out NDArray val_sample_weight) + { + val_x = this.val_x; + val_y = this.val_y; + val_sample_weight = this.val_sample_weight; + } + } +} diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs index 6c7d53b2f..b2750496a 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Text; using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Util; namespace Tensorflow.Keras.Engine.DataAdapters { @@ -34,9 +35,67 @@ public virtual (Tensors, Tensors) Expand1d(Tensors x, Tensors y) return (x, y); } + public virtual (Tensors, Tensors, Tensors) Expand1d(Tensors x, Tensors y, Tensors sample_weight) + { + for (int i = 0; i < x.Length; i++) + { + if (x[i].shape.ndim == 1) + x[i] = array_ops.expand_dims(x[i], axis: -1); + } + for (int i = 0; i < y.Length; i++) + { + if (y[i].shape.ndim == 1) + y[i] = array_ops.expand_dims(y[i], axis: -1); + } + for (int i = 0; i < sample_weight.Length; i++) + { + if (sample_weight[i].shape.ndim == 1) + sample_weight[i] = array_ops.expand_dims(sample_weight[i], axis: -1); + } + return (x, y, sample_weight); + } + public virtual bool ShouldRecreateIterator() { return true; } + + public static ((NDArray, NDArray, NDArray),ValidationDataPack) train_validation_split((NDArray, NDArray, NDArray) x_y_sample_weight, float validation_split) + { + var x = x_y_sample_weight.Item1; + var y = x_y_sample_weight.Item2; + var sample_weight = x_y_sample_weight.Item3; + int train_count = Convert.ToInt32(x.dims[0] * (1 - validation_split)); + var train_x = x[new Slice(0, train_count)]; + var train_y = y[new Slice(0, train_count)]; + ValidationDataPack validation_data; + if (sample_weight != null) + { + validation_data = (x[new Slice(train_count)], y[new Slice(train_count)], sample_weight[new Slice(train_count)]); + sample_weight = sample_weight[new Slice(0, train_count)]; + } + else + { + validation_data = (x[new Slice(train_count)], y[new Slice(train_count)]); + } + + return ((train_x, train_y, sample_weight), validation_data); + } + + public static ((IEnumerable, NDArray, NDArray), ValidationDataPack) train_validation_split((IEnumerable, NDArray, NDArray) x_y_sample_weight, float validation_split) + { + var x = x_y_sample_weight.Item1; + var y = x_y_sample_weight.Item2; + var sample_weight = x_y_sample_weight.Item3; + int train_count = Convert.ToInt32(y.dims[0] * (1 - validation_split)); + var train_x = x.Select(x => x[new Slice(0, train_count)] as NDArray); + var train_y = y[new Slice(0, train_count)]; + var val_x = x.Select(x => x[new Slice(train_count)] as NDArray); + var val_y = y[new Slice(train_count)]; + NDArray tmp_sample_weight = sample_weight; + sample_weight = sample_weight[new Slice(0, train_count)]; + ValidationDataPack validation_data = (val_x, val_y, tmp_sample_weight[new Slice(train_count)]); + return ((train_x, train_y, sample_weight), validation_data); + } } } diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataHandler.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataHandler.cs index 4723222f2..a5ee75c93 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataHandler.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataHandler.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using Tensorflow.Keras.ArgsDefinition; using static Tensorflow.Binding; +using Tensorflow.Keras.Utils; namespace Tensorflow.Keras.Engine.DataAdapters { @@ -28,6 +29,7 @@ public class DataHandler public DataHandler(DataHandlerArgs args) { this.args = args; + if (args.StepsPerExecution == null) { _steps_per_execution = tf.Variable(1L); @@ -48,6 +50,7 @@ public DataHandler(DataHandlerArgs args) BatchSize = args.BatchSize, Steps = args.StepsPerEpoch, Epochs = args.Epochs - args.InitialEpoch, + SampleWeight = args.SampleWeight, Shuffle = args.Shuffle, MaxQueueSize = args.MaxQueueSize, Worker = args.Workers, diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/IDataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/IDataAdapter.cs index 4bdc49795..bb71b0a2d 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/IDataAdapter.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/IDataAdapter.cs @@ -17,6 +17,8 @@ public interface IDataAdapter IDatasetV2 GetDataset(); int GetSize(); (Tensors, Tensors) Expand1d(Tensors x, Tensors y); + (Tensors, Tensors, Tensors) Expand1d(Tensors x, Tensors y, Tensors sample_weight); + bool ShouldRecreateIterator(); } } diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs index 16e646a35..978a3f51c 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs @@ -20,7 +20,7 @@ public class TensorLikeDataAdapter : DataAdapter, IDataAdapter public TensorLikeDataAdapter(DataAdapterArgs args) { this.args = args; - _process_tensorlike(); + Tensor sample_weight_tensor = args.SampleWeight != null ? _process_tensorlike(args.SampleWeight) : null; num_samples = (int)args.X.shape[0]; var batch_size = args.BatchSize == -1 ? 32 : args.BatchSize; _batch_size = batch_size; @@ -37,6 +37,8 @@ public TensorLikeDataAdapter(DataAdapterArgs args) inputs.AddRange(args.X); if (args.Y != null) inputs.AddRange(args.Y); + if (sample_weight_tensor != null) + inputs.Add(sample_weight_tensor); dataset = slice_inputs(indices_dataset, inputs); dataset.FirstInputTensorCount = args.X.Length; } @@ -94,8 +96,9 @@ IDatasetV2 slice_inputs(IDatasetV2 indices_dataset, Tensors elements) public override bool ShouldRecreateIterator() => false; - void _process_tensorlike() + Tensor _process_tensorlike(NDArray sample_weights) { + return tf.convert_to_tensor(sample_weights); } } } diff --git a/src/TensorFlowNET.Keras/Engine/LossesContainer.cs b/src/TensorFlowNET.Keras/Engine/LossesContainer.cs index 6a91450de..c06fca593 100644 --- a/src/TensorFlowNET.Keras/Engine/LossesContainer.cs +++ b/src/TensorFlowNET.Keras/Engine/LossesContainer.cs @@ -26,11 +26,11 @@ public LossesContainer(ILossFunc losses, string[] output_names = null) /// /// /// - public Tensor Call(Tensor y_true, Tensor y_pred) + public Tensor Call(Tensor y_true, Tensor y_pred, Tensor sample_weight = null) { if (!_built) Build(y_pred); - var loss_value = _losses.Call(y_true, y_pred); + var loss_value = _losses.Call(y_true, y_pred, sample_weight:sample_weight); var loss_metric_value = loss_value; var batch_dim = array_ops.shape(y_true)[0]; diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index a74a77f18..626d7fcad 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -30,6 +30,7 @@ public partial class Model public Dictionary evaluate(NDArray x, NDArray y, int batch_size = -1, int verbose = 1, + NDArray sample_weight = null, int steps = -1, int max_queue_size = 10, int workers = 1, @@ -51,6 +52,7 @@ public Dictionary evaluate(NDArray x, NDArray y, StepsPerEpoch = steps, InitialEpoch = 0, Epochs = 1, + SampleWeight = sample_weight, MaxQueueSize = max_queue_size, Workers = workers, UseMultiprocessing = use_multiprocessing, @@ -140,7 +142,8 @@ Dictionary evaluate(DataHandler data_handler, CallbackList callba Dictionary test_function(DataHandler data_handler, OwnedIterator iterator) { var data = iterator.next(); - var outputs = test_step(data_handler, data[0], data[1]); + var outputs = data.Length == 2 ? test_step(data_handler, data[0], data[1]) : + test_step(data_handler, data[0], data[1], data[2]); tf_with(ops.control_dependencies(new object[0]), ctl => _test_counter.assign_add(1)); return outputs; } @@ -149,17 +152,23 @@ Dictionary test_step_multi_inputs_function(DataHandler data_handl { var data = iterator.next(); var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; - var outputs = test_step(data_handler, data.Take(x_size).ToArray(), data.Skip(x_size).ToArray()); + var outputs = data.Length == 2 ? + test_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray())) : + test_step( + data_handler, + new Tensors(data.Take(x_size).ToArray()), + new Tensors(data.Skip(x_size).Take(x_size).ToArray()), + new Tensors(data.Skip(2 * x_size).ToArray())); tf_with(ops.control_dependencies(new object[0]), ctl => _test_counter.assign_add(1)); return outputs; } - Dictionary test_step(DataHandler data_handler, Tensors x, Tensors y) + Dictionary test_step(DataHandler data_handler, Tensors x, Tensors y, Tensors sample_weight = null) { - (x, y) = data_handler.DataAdapter.Expand1d(x, y); + (x, y, sample_weight) = data_handler.DataAdapter.Expand1d(x, y, sample_weight); var y_pred = Apply(x, training: false); - var loss = compiled_loss.Call(y, y_pred); + var loss = compiled_loss.Call(y, y_pred, sample_weight:sample_weight); compiled_metrics.update_state(y, y_pred); return metrics.Select(x => (x.Name, x.result())).ToDictionary(x => x.Item1, x => (float)x.Item2); } diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index d6f89d8be..23c53b707 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -6,10 +6,12 @@ using Tensorflow.Keras.Engine.DataAdapters; using System.Diagnostics; using Tensorflow.Keras.Callbacks; -using System.Data; +using Tensorflow.Util; namespace Tensorflow.Keras.Engine { + + public partial class Model { /// @@ -19,19 +21,29 @@ public partial class Model /// /// /// - /// /// + /// /// /// /// + /// + /// + /// + /// + /// + /// + /// + /// public ICallback fit(NDArray x, NDArray y, int batch_size = -1, int epochs = 1, int verbose = 1, List callbacks = null, float validation_split = 0f, - (NDArray val_x, NDArray val_y)? validation_data = null, + ValidationDataPack validation_data = null, bool shuffle = true, + Dictionary class_weight = null, + NDArray sample_weight = null, int initial_epoch = 0, int max_queue_size = 10, int workers = 1, @@ -43,21 +55,25 @@ public ICallback fit(NDArray x, NDArray y, $"The array x and y should have same value at dim 0, but got {x.dims[0]} and {y.dims[0]}"); } - var train_x = x; - var train_y = y; + // The default dtype in NDArray is double, so we need to cast sample_weight to float to mul with loss which's dtype is float. + sample_weight = sample_weight?.astype(TF_DataType.TF_FLOAT); if (validation_split != 0f && validation_data == null) { - int train_count = Convert.ToInt32(x.dims[0] * (1 - validation_split)); - train_x = x[new Slice(0, train_count)]; - train_y = y[new Slice(0, train_count)]; - validation_data = (val_x: x[new Slice(train_count)], val_y: y[new Slice(train_count)]); + ((x, y, sample_weight), validation_data) = DataAdapter.train_validation_split((x, y, sample_weight), validation_split); + } + + // TODO(Wanglongzhi2001) + if (class_weight != null) + { + throw new NotImplementedException("class_weight is not implemented"); } var data_handler = new DataHandler(new DataHandlerArgs { - X = train_x, - Y = train_y, + X = x, + Y = y, + SampleWeight = sample_weight, BatchSize = batch_size, InitialEpoch = initial_epoch, Epochs = epochs, @@ -73,14 +89,17 @@ public ICallback fit(NDArray x, NDArray y, train_step_func: train_step_function); } + public ICallback fit(IEnumerable x, NDArray y, int batch_size = -1, int epochs = 1, int verbose = 1, List callbacks = null, float validation_split = 0f, - (IEnumerable val_x, NDArray val_y)? validation_data = null, + ValidationDataPack validation_data = null, bool shuffle = true, + Dictionary class_weight = null, + NDArray sample_weight = null, int initial_epoch = 0, int max_queue_size = 10, int workers = 1, @@ -95,27 +114,23 @@ public ICallback fit(IEnumerable x, NDArray y, } } - var train_x = x; - var train_y = y; + sample_weight = sample_weight?.astype(TF_DataType.TF_FLOAT); + if (validation_split != 0f && validation_data == null) { - int train_count = Convert.ToInt32(y.dims[0] * (1 - validation_split)); - train_x = x.Select(x => x[new Slice(0, train_count)] as NDArray); - train_y = y[new Slice(0, train_count)]; - var val_x = x.Select(x => x[new Slice(train_count)] as NDArray); - var val_y = y[new Slice(train_count)]; - validation_data = (val_x, val_y); + ((x, y, sample_weight), validation_data) = DataAdapter.train_validation_split((x, y, sample_weight), validation_split); } var data_handler = new DataHandler(new DataHandlerArgs { - X = new Tensors(train_x.ToArray()), - Y = train_y, + X = new Tensors(x.ToArray()), + Y = y, BatchSize = batch_size, InitialEpoch = initial_epoch, Epochs = epochs, Shuffle = shuffle, + SampleWeight = sample_weight, MaxQueueSize = max_queue_size, Workers = workers, UseMultiprocessing = use_multiprocessing, @@ -142,8 +157,10 @@ public History fit(IDatasetV2 dataset, int verbose = 1, List callbacks = null, IDatasetV2 validation_data = null, - int validation_step = 10, // 间隔多少次会进行一次验证 + int validation_step = 10, bool shuffle = true, + Dictionary class_weight = null, + NDArray sample_weight = null, int initial_epoch = 0, int max_queue_size = 10, int workers = 1, @@ -210,7 +227,7 @@ History FitInternal(DataHandler data_handler, int epochs, int validation_step, i { if (validation_step > 0 && epoch ==0 || (epoch) % validation_step != 0) continue; - + var val_logs = evaluate(validation_data); foreach(var log in val_logs) { @@ -233,7 +250,7 @@ History FitInternal(DataHandler data_handler, int epochs, int validation_step, i return callbacks.History; } - History FitInternal(DataHandler data_handler, int epochs, int verbose, List callbackList, (NDArray, NDArray)? validation_data, + History FitInternal(DataHandler data_handler, int epochs, int verbose, List callbackList, ValidationDataPack validation_data, Func> train_step_func) { stop_training = false; @@ -274,7 +291,8 @@ History FitInternal(DataHandler data_handler, int epochs, int verbose, List callbackList, (IEnumerable, NDArray)? validation_data, - Func> train_step_func) - { - stop_training = false; - _train_counter.assign(0); - var callbacks = new CallbackList(new CallbackParams - { - Model = this, - Verbose = verbose, - Epochs = epochs, - Steps = data_handler.Inferredsteps - }); - - if (callbackList != null) - { - foreach (var callback in callbackList) - callbacks.callbacks.add(callback); - } - - callbacks.on_train_begin(); - - foreach (var (epoch, iterator) in data_handler.enumerate_epochs()) - { - reset_metrics(); - callbacks.on_epoch_begin(epoch); - // data_handler.catch_stop_iteration(); - var logs = new Dictionary(); - long End_step = 0; - foreach (var step in data_handler.steps()) - { - callbacks.on_train_batch_begin(step); - logs = train_step_func(data_handler, iterator); - var end_step = step + data_handler.StepIncrement; - End_step = end_step; - callbacks.on_train_batch_end(end_step, logs); - } - - if (validation_data != null) - { - var val_logs = evaluate(validation_data.Value.Item1, validation_data.Value.Item2); - foreach (var log in val_logs) - { - logs["val_" + log.Key] = log.Value; - callbacks.on_train_batch_end(End_step, logs); - } - } - - callbacks.on_epoch_end(epoch, logs); - - GC.Collect(); - GC.WaitForPendingFinalizers(); - if (stop_training) - { - break; - } - } - - return callbacks.History; - } } } diff --git a/src/TensorFlowNET.Keras/Engine/Model.Train.cs b/src/TensorFlowNET.Keras/Engine/Model.Train.cs index ad3c70d2d..8f1ec808c 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Train.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Train.cs @@ -12,7 +12,9 @@ public partial class Model Dictionary train_step_function(DataHandler data_handler, OwnedIterator iterator) { var data = iterator.next(); - var outputs = train_step(data_handler, data[0], data[1]); + // whether have sample_weight + var outputs = data.Length == 2 ? train_step(data_handler, data[0], data[1]) : + train_step(data_handler, data[0], data[1], data[2]); tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1)); return outputs; } @@ -21,7 +23,13 @@ Dictionary train_step_multi_inputs_function(DataHandler data_hand { var data = iterator.next(); var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount; - var outputs = train_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray())); + var outputs = data.Length == 2 ? + train_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray())) : + train_step( + data_handler, + new Tensors(data.Take(x_size).ToArray()), + new Tensors(data.Skip(x_size).Take(x_size).ToArray()), + new Tensors(data.Skip(2 * x_size).ToArray())); tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1)); return outputs; } @@ -61,6 +69,34 @@ Dictionary train_step(DataHandler data_handler, Tensors x, Tensor }); return dict; } + Dictionary train_step(DataHandler data_handler, Tensors x, Tensors y, Tensors sample_weight = null) + { + (x, y, sample_weight) = data_handler.DataAdapter.Expand1d(x, y, sample_weight); + using var tape = tf.GradientTape(); + var y_pred = Apply(x, training: true); + var loss = compiled_loss.Call(y, y_pred, sample_weight:sample_weight); + + // For custom training steps, users can just write: + // trainable_variables = self.trainable_variables + // gradients = tape.gradient(loss, trainable_variables) + // self.optimizer.apply_gradients(zip(gradients, trainable_variables)) + // The _minimize call does a few extra steps unnecessary in most cases, + // such as loss scaling and gradient clipping. + _minimize(tape, optimizer, loss, TrainableVariables); + compiled_metrics.update_state(y, y_pred); + + var dict = new Dictionary(); + metrics.ToList().ForEach(x => + { + var r = x.result(); + if (r.ndim > 0) + { + r = tf.reduce_mean(r); + } + dict[x.Name] = (float)r; + }); + return dict; + } void _minimize(GradientTape tape, IOptimizer optimizer, Tensor loss, List trainable_variables) { diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs index dbf5cae1e..67e2b0464 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs @@ -74,8 +74,8 @@ public void TrainLSTMWithMnist() OneHot = true, ValidationSize = 55000, }).Result; - - model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 1); + var sample_weight = np.ones(((int)dataset.Train.Data.shape[0])); + model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 1, sample_weight:sample_weight); } [TestMethod] From 0f02885dfb3647ae1b2bfae51491b4f119da4be9 Mon Sep 17 00:00:00 2001 From: hchen Date: Mon, 2 Oct 2023 18:57:17 -0500 Subject: [PATCH 134/182] Allow Model to cache weights. --- .../Engine/Model.Training.cs | 35 ++++++++++++++++++- src/TensorFlowNET.Keras/Saving/hdf5_format.cs | 4 +-- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Training.cs b/src/TensorFlowNET.Keras/Engine/Model.Training.cs index 50d934d9d..457b3d694 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Training.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Training.cs @@ -10,8 +10,38 @@ namespace Tensorflow.Keras.Engine { public partial class Model { + static Dictionary> weightsCache + = new Dictionary>(); + public void load_weights(string filepath, bool by_name = false, bool skip_mismatch = false, object options = null) { + // Get from cache + if (weightsCache.ContainsKey(filepath)) + { + var filtered_layers = new List(); + foreach (var layer in Layers) + { + var weights = hdf5_format._legacy_weights(layer); + if (weights.Count > 0) + filtered_layers.append(layer); + } + + var weight_value_tuples = new List<(IVariableV1, NDArray)>(); + filtered_layers.Select((layer, i) => + { + var symbolic_weights = hdf5_format._legacy_weights(layer); + foreach(var weight in symbolic_weights) + { + var weight_value = weightsCache[filepath].First(x => x.Item1 == weight.Name).Item2; + weight_value_tuples.Add((weight, weight_value)); + } + return layer; + }).ToList(); + + keras.backend.batch_set_value(weight_value_tuples); + return; + } + long fileId = Hdf5.OpenFile(filepath, true); if(fileId < 0) { @@ -29,8 +59,11 @@ public void load_weights(string filepath, bool by_name = false, bool skip_mismat throw new NotImplementedException(""); else { - hdf5_format.load_weights_from_hdf5_group(fileId, Layers); + var weight_value_tuples = hdf5_format.load_weights_from_hdf5_group(fileId, Layers); Hdf5.CloseFile(fileId); + + weightsCache[filepath] = weight_value_tuples.Select(x => (x.Item1.Name, x.Item2)).ToList(); + keras.backend.batch_set_value(weight_value_tuples); } } diff --git a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs index bab0efecf..68b73953d 100644 --- a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs +++ b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs @@ -82,7 +82,7 @@ public static void load_optimizer_weights_from_hdf5_group(long filepath = -1, Di } - public static void load_weights_from_hdf5_group(long f, List layers) + public static List<(IVariableV1, NDArray)> load_weights_from_hdf5_group(long f, List layers) { string original_keras_version = "2.5.0"; string original_backend = null; @@ -152,7 +152,7 @@ public static void load_weights_from_hdf5_group(long f, List layers) weight_value_tuples.AddRange(zip(symbolic_weights, weight_values)); } - keras.backend.batch_set_value(weight_value_tuples); + return weight_value_tuples; } public static void toarrayf4(long filepath = -1, Dictionary custom_objects = null, bool compile = false) From a1c64effcfe7976b6cb0f3fbbd268cee203b4874 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Thu, 5 Oct 2023 20:49:22 +0800 Subject: [PATCH 135/182] feat: add the implementation of class_weight in model.fit --- .../Engine/DataAdapters/DataHandler.cs | 70 ++++++++++++++++++- .../Engine/Model.Evaluate.cs | 13 +++- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 11 ++- 3 files changed, 84 insertions(+), 10 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataHandler.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataHandler.cs index a5ee75c93..a305e5033 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataHandler.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataHandler.cs @@ -3,6 +3,8 @@ using Tensorflow.Keras.ArgsDefinition; using static Tensorflow.Binding; using Tensorflow.Keras.Utils; +using Tensorflow.Util; +using Tensorflow.Framework; namespace Tensorflow.Keras.Engine.DataAdapters { @@ -24,6 +26,7 @@ public class DataHandler long _steps_per_execution_value; int _initial_epoch => args.InitialEpoch; int _epochs => args.Epochs; + NDArray _sample_weight => args.SampleWeight; IVariableV1 _steps_per_execution; public DataHandler(DataHandlerArgs args) @@ -75,10 +78,75 @@ public DataHandler(DataHandlerArgs args) } _dataset = _adapter.GetDataset(); - _inferred_steps = _infer_steps(args.StepsPerEpoch, _dataset); _current_step = 0; _step_increment = _steps_per_execution_value - 1; _insufficient_data = false; + _configure_dataset_and_inferred_steps(args.X, args.ClassWeight); + } + + void _configure_dataset_and_inferred_steps(Tensors x, Dictionary class_weight) + { + if (_dataset == null) + { + _dataset = _adapter.GetDataset(); + _inferred_steps = _infer_steps(args.StepsPerEpoch, _dataset); + } + + if (class_weight != null) + { + _dataset = _dataset.map(_make_class_weight_map_fn(class_weight)); + } + _inferred_steps = _infer_steps(args.StepsPerEpoch, _dataset); + } + + + Func _make_class_weight_map_fn(Dictionary class_weight) + { + var class_ids = class_weight.Keys.OrderBy(key => key).ToList(); + var expected_class_ids = range(class_ids[0], class_ids[class_ids.Count - 1] + 1); + if (!class_ids.SequenceEqual(expected_class_ids)) + { + throw new ValueError("Expected `class_weight` to be a dict with keys from 0 to one less "+ + $"than the number of classes, found {class_weight}"); + } + + var class_weight_list = new List(); + foreach (var class_id in class_ids) + { + class_weight_list.Add(class_weight[class_id]); + } + var class_weight_tensor = tf.convert_to_tensor(class_weight_list.ToArray()); + + Func _class_weight_map_fn = (Tensors data) => + { + var x = data[0]; + var y = data[1]; + var sw = _sample_weight == null ? null : ops.convert_to_tensor(_sample_weight); + + if (y.shape.rank > 2) + { + throw new ValueError("`class_weight` not supported for 3+ dimensional targets."); + } + + var y_classes = smart_module.smart_cond( + y.shape.rank == 2 && y.shape[1] > 1, + () => math_ops.argmax(y, dimension: 1), + () => math_ops.cast(tf.reshape(y, (-1)), TF_DataType.TF_INT64)); + + var cw = array_ops.gather(class_weight_tensor, y_classes); + if (sw != null) + { + cw = tf.cast(cw, sw.dtype); + cw *= sw; + } + else + { + sw = cw; + } + return new Tensors { x, y, sw }; + }; + + return _class_weight_map_fn; } long _infer_steps(int steps_per_epoch, IDatasetV2 dataset) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index 626d7fcad..94a2e6646 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -164,11 +164,20 @@ Dictionary test_step_multi_inputs_function(DataHandler data_handl } - Dictionary test_step(DataHandler data_handler, Tensors x, Tensors y, Tensors sample_weight = null) + Dictionary test_step(DataHandler data_handler, Tensors x, Tensors y) + { + (x,y) = data_handler.DataAdapter.Expand1d(x, y); + var y_pred = Apply(x, training: false); + var loss = compiled_loss.Call(y, y_pred); + compiled_metrics.update_state(y, y_pred); + return metrics.Select(x => (x.Name, x.result())).ToDictionary(x => x.Item1, x => (float)x.Item2); + } + + Dictionary test_step(DataHandler data_handler, Tensors x, Tensors y, Tensors sample_weight) { (x, y, sample_weight) = data_handler.DataAdapter.Expand1d(x, y, sample_weight); var y_pred = Apply(x, training: false); - var loss = compiled_loss.Call(y, y_pred, sample_weight:sample_weight); + var loss = compiled_loss.Call(y, y_pred, sample_weight: sample_weight); compiled_metrics.update_state(y, y_pred); return metrics.Select(x => (x.Name, x.result())).ToDictionary(x => x.Item1, x => (float)x.Item2); } diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index 23c53b707..689fc9fb8 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -63,12 +63,6 @@ public ICallback fit(NDArray x, NDArray y, ((x, y, sample_weight), validation_data) = DataAdapter.train_validation_split((x, y, sample_weight), validation_split); } - // TODO(Wanglongzhi2001) - if (class_weight != null) - { - throw new NotImplementedException("class_weight is not implemented"); - } - var data_handler = new DataHandler(new DataHandlerArgs { X = x, @@ -78,6 +72,7 @@ public ICallback fit(NDArray x, NDArray y, InitialEpoch = initial_epoch, Epochs = epochs, Shuffle = shuffle, + ClassWeight = class_weight, MaxQueueSize = max_queue_size, Workers = workers, UseMultiprocessing = use_multiprocessing, @@ -126,11 +121,12 @@ public ICallback fit(IEnumerable x, NDArray y, { X = new Tensors(x.ToArray()), Y = y, + SampleWeight = sample_weight, BatchSize = batch_size, InitialEpoch = initial_epoch, Epochs = epochs, Shuffle = shuffle, - SampleWeight = sample_weight, + ClassWeight = class_weight, MaxQueueSize = max_queue_size, Workers = workers, UseMultiprocessing = use_multiprocessing, @@ -174,6 +170,7 @@ public History fit(IDatasetV2 dataset, InitialEpoch = initial_epoch, Epochs = epochs, Shuffle = shuffle, + SampleWeight = sample_weight, MaxQueueSize = max_queue_size, Workers = workers, UseMultiprocessing = use_multiprocessing, From ba8f0b084fe30868f091a168d2afa4ff274971d1 Mon Sep 17 00:00:00 2001 From: dogvane Date: Sun, 8 Oct 2023 21:45:26 +0800 Subject: [PATCH 136/182] =?UTF-8?q?add=20DepthwiseConv2D=20(=E6=B7=B1?= =?UTF-8?q?=E5=BA=A6=E5=8F=AF=E5=88=86=E7=A6=BB=E5=8D=B7=E7=A7=AF)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Eager/EagerRunner.RecordGradient.cs | 5 + src/TensorFlowNET.Core/Gradients/nn_grad.cs | 31 ++++ .../Keras/Layers/ILayersApi.cs | 13 ++ src/TensorFlowNET.Core/Tensors/tensor_util.cs | 5 +- .../Layers/Convolution/DepthwiseConv2D.cs | 167 ++++++++++++++++++ src/TensorFlowNET.Keras/Layers/LayersApi.cs | 32 ++++ .../EagerModeTestBase.cs | 34 ++++ .../Layers/Layers.Convolution.Test.cs | 125 +++++++++++++ .../EagerModeTestBase.cs | 14 ++ 9 files changed, 425 insertions(+), 1 deletion(-) create mode 100644 src/TensorFlowNET.Keras/Layers/Convolution/DepthwiseConv2D.cs diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.RecordGradient.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.RecordGradient.cs index 59d5fd030..2bdd65f5b 100644 --- a/src/TensorFlowNET.Core/Eager/EagerRunner.RecordGradient.cs +++ b/src/TensorFlowNET.Core/Eager/EagerRunner.RecordGradient.cs @@ -80,6 +80,11 @@ BackwardFunction GetGradientFunction(string op_name, Tensor[] op_outputs) => (out_grads, unneeded_gradients) => { + if(!ops.gradientFunctions.ContainsKey(op_name)) + { + throw new Exception($"gradientFunctions not find op_name: {op_name}"); + } + if (ops.gradientFunctions[op_name] == null) return new Tensor[op_inputs.Length]; diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs index a43a91b9a..87646a9ea 100644 --- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs @@ -229,6 +229,37 @@ public static Tensor[] _Conv2DGrad(Operation op, Tensor[] grads) }; } + /// + /// Gradient function for Conv2D. + /// + /// + /// + /// + [RegisterGradient("DepthwiseConv2dNative")] + public static Tensor[] _DepthwiseConv2DGrad(Operation op, Tensor[] grads) + { + var dilations = op.get_attr_list("dilations"); + var strides = op.get_attr_list("strides"); + var padding = op.get_attr("padding"); + var explicit_paddings = op.get_attr_list("explicit_paddings"); + var data_format = op.get_attr("data_format"); + var shape = gen_array_ops.shape_n(new Tensor[] { op.inputs[0], op.inputs[1] }); + + return new Tensor[] + { + gen_nn_ops.depthwise_conv2d_native_backprop_input( + shape[0], op.inputs[1], grads[0], + strides, padding, explicit_paddings, + dilations: dilations, + data_format: data_format), + gen_nn_ops.depthwise_conv2d_native_backprop_filter(op.inputs[0], shape[1], grads[0], + strides, padding, + dilations: dilations, + explicit_paddings: explicit_paddings, + data_format: data_format) + }; + } + [RegisterGradient("FusedBatchNorm")] public static Tensor[] _FusedBatchNormGrad(Operation op, Tensor[] grads) => _BaseFusedBatchNormGrad(op, 0, grads); diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index 5e08eadc4..a8141d354 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -95,6 +95,19 @@ public ILayer Conv2D(int filters, bool use_bias = true, string kernel_initializer = "glorot_uniform", string bias_initializer = "zeros"); + public ILayer DepthwiseConv2D(Shape kernel_size = null, + Shape strides = null, + string padding = "valid", + string data_format = null, + Shape dilation_rate = null, + int groups = 1, + int depth_multiplier = 1, + string activation = null, + bool use_bias = false, + string kernel_initializer = "glorot_uniform", + string bias_initializer = "zeros", + string depthwise_initializer = "glorot_uniform" + ); public ILayer Dense(int units); public ILayer Dense(int units, diff --git a/src/TensorFlowNET.Core/Tensors/tensor_util.cs b/src/TensorFlowNET.Core/Tensors/tensor_util.cs index e65c4850d..f688d4d5d 100644 --- a/src/TensorFlowNET.Core/Tensors/tensor_util.cs +++ b/src/TensorFlowNET.Core/Tensors/tensor_util.cs @@ -249,6 +249,9 @@ public static TensorProto make_tensor_proto(object values, TF_DataType dtype = T case sbyte val: tensor_proto.IntVal.AddRange(new[] { (int)val }); break; + case byte val: + tensor_proto.IntVal.AddRange(new[] { (int)val }); + break; case int val: tensor_proto.IntVal.AddRange(new[] { val }); break; @@ -262,7 +265,7 @@ public static TensorProto make_tensor_proto(object values, TF_DataType dtype = T tensor_proto.DoubleVal.AddRange(new[] { val }); break; default: - throw new Exception("make_tensor_proto Not Implemented"); + throw new Exception($"make_tensor_proto Not Implemented {values.GetType().Name}"); } } diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/DepthwiseConv2D.cs b/src/TensorFlowNET.Keras/Layers/Convolution/DepthwiseConv2D.cs new file mode 100644 index 000000000..dae4a4036 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolution/DepthwiseConv2D.cs @@ -0,0 +1,167 @@ +using System; +using System.Collections.Generic; +using System.Text; +using System; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.Saving; +using Tensorflow.Common.Types; +using Tensorflow.Keras.Utils; +using Tensorflow.Operations; +using Newtonsoft.Json; +using System.Security.Cryptography; + +namespace Tensorflow.Keras.Layers +{ + public class DepthwiseConv2DArgs: Conv2DArgs + { + /// + /// depth_multiplier: The number of depthwise convolution output channels for + /// each input channel.The total number of depthwise convolution output + /// channels will be equal to `filters_in* depth_multiplier`. + /// + [JsonProperty("depth_multiplier")] + public int DepthMultiplier { get; set; } = 1; + + [JsonProperty("depthwise_initializer")] + public IInitializer DepthwiseInitializer { get; set; } + } + + public class DepthwiseConv2D : Conv2D + { + /// + /// depth_multiplier: The number of depthwise convolution output channels for + /// each input channel.The total number of depthwise convolution output + /// channels will be equal to `filters_in* depth_multiplier`. + /// + int DepthMultiplier = 1; + + IInitializer DepthwiseInitializer; + + int[] strides; + + int[] dilation_rate; + + string getDataFormat() + { + return data_format == "channels_first" ? "NCHW" : "NHWC"; + } + + static int _id = 1; + + public DepthwiseConv2D(DepthwiseConv2DArgs args):base(args) + { + args.Padding = args.Padding.ToUpper(); + + if(string.IsNullOrEmpty(args.Name)) + name = "DepthwiseConv2D_" + _id; + + this.DepthMultiplier = args.DepthMultiplier; + this.DepthwiseInitializer = args.DepthwiseInitializer; + + } + + public override void build(KerasShapesWrapper input_shape) + { + //base.build(input_shape); + + var shape = input_shape.ToSingleShape(); + + int channel_axis = data_format == "channels_first" ? 1 : -1; + var input_channel = channel_axis < 0 ? + shape.dims[shape.ndim + channel_axis] : + shape.dims[channel_axis]; + + var arg = args as DepthwiseConv2DArgs; + + if (arg.Strides.ndim != shape.ndim) + { + if (arg.Strides.ndim == 2) + { + this.strides = new int[] { 1, (int)arg.Strides[0], (int)arg.Strides[1], 1 }; + } + else + { + this.strides = conv_utils.normalize_tuple(new int[] { (int)arg.Strides[0] }, shape.ndim, "strides"); + } + } + else + { + this.strides = arg.Strides.dims.Select(o=>(int)(o)).ToArray(); + } + + if (arg.DilationRate.ndim != shape.ndim) + { + this.dilation_rate = conv_utils.normalize_tuple(new int[] { (int)arg.DilationRate[0] }, shape.ndim, "dilation_rate"); + } + + long channel_data = data_format == "channels_first" ? shape[0] : shape[shape.Length - 1]; + + var depthwise_kernel_shape = this.kernel_size.dims.concat(new long[] { + channel_data, + this.DepthMultiplier + }); + + this.kernel = this.add_weight( + shape: depthwise_kernel_shape, + initializer: this.DepthwiseInitializer != null ? this.DepthwiseInitializer : this.kernel_initializer, + name: "depthwise_kernel", + trainable: true, + dtype: DType, + regularizer: this.kernel_regularizer + ); + + var axes = new Dictionary(); + axes.Add(-1, (int)input_channel); + inputSpec = new InputSpec(min_ndim: rank + 2, axes: axes); + + + if (use_bias) + { + bias = add_weight(name: "bias", + shape: ((int)channel_data), + initializer: bias_initializer, + trainable: true, + dtype: DType); + } + + built = true; + _buildInputShape = input_shape; + } + + protected override Tensors Call(Tensors inputs, Tensors state = null, + bool? training = false, IOptionalArgs? optional_args = null) + { + Tensor outputs = null; + + outputs = gen_nn_ops.depthwise_conv2d_native( + inputs, + filter: this.kernel.AsTensor(), + strides: this.strides, + padding: this.padding, + dilations: this.dilation_rate, + data_format: this.getDataFormat(), + name: name + ); + + if (use_bias) + { + if (data_format == "channels_first") + { + throw new NotImplementedException("call channels_first"); + } + else + { + outputs = gen_nn_ops.bias_add(outputs, ops.convert_to_tensor(bias), + data_format: this.getDataFormat(), name: name); + } + } + + if (activation != null) + outputs = activation.Apply(outputs); + + + return outputs; + } + + } +} \ No newline at end of file diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 928e7e337..95828fbf7 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -210,6 +210,38 @@ public ILayer Conv2D(int filters, Activation = keras.activations.GetActivationFromName(activation) }); + public ILayer DepthwiseConv2D(Shape kernel_size = null, + Shape strides = null, + string padding = "valid", + string data_format = null, + Shape dilation_rate = null, + int groups = 1, + int depth_multiplier = 1, + string activation = null, + bool use_bias = false, + string kernel_initializer = "glorot_uniform", + string bias_initializer = "zeros", + string depthwise_initializer = "glorot_uniform" + ) + => new DepthwiseConv2D(new DepthwiseConv2DArgs + { + Rank = 2, + Filters = 1, + KernelSize = (kernel_size == null) ? (5, 5) : kernel_size, + Strides = strides == null ? (1) : strides, + Padding = padding, + DepthMultiplier = depth_multiplier, + DataFormat = data_format, + DilationRate = dilation_rate == null ? (1) : dilation_rate, + Groups = groups, + UseBias = use_bias, + KernelInitializer = GetInitializerByName(kernel_initializer), + DepthwiseInitializer = GetInitializerByName(depthwise_initializer == null ? kernel_initializer : depthwise_initializer), + BiasInitializer = GetInitializerByName(bias_initializer), + Activation = keras.activations.GetActivationFromName(activation), + }); + + /// /// Transposed convolution layer (sometimes called Deconvolution). /// diff --git a/test/TensorFlowNET.Keras.UnitTest/EagerModeTestBase.cs b/test/TensorFlowNET.Keras.UnitTest/EagerModeTestBase.cs index c7eab364c..635f13a54 100644 --- a/test/TensorFlowNET.Keras.UnitTest/EagerModeTestBase.cs +++ b/test/TensorFlowNET.Keras.UnitTest/EagerModeTestBase.cs @@ -33,6 +33,40 @@ public bool Equal(float[] f1, float[] f2) return ret; } + + public void AssertArray(int[] f1, int[] f2) + { + bool ret = false; + for (var i = 0; i < f1.Length; i++) + { + ret = f1[i] == f2[i]; + if (!ret) + break; + } + + if (!ret) + { + Assert.Fail($"Array not Equal:[{string.Join(",", f1)}] [{string.Join(",", f2)}]"); + } + } + + public void AssertArray(float[] f1, float[] f2) + { + bool ret = false; + var tolerance = .00001f; + for (var i = 0; i < f1.Length; i++) + { + ret = Math.Abs(f1[i] - f2[i]) <= tolerance; + if (!ret) + break; + } + + if (!ret) + { + Assert.Fail($"Array float not Equal:[{string.Join(",", f1)}] [{string.Join(",", f2)}]"); + } + } + public bool Equal(double[] d1, double[] d2) { bool ret = false; diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Convolution.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Convolution.Test.cs index 997dcb4f6..15c6e80fe 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Convolution.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Convolution.Test.cs @@ -1,6 +1,8 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Linq; using Tensorflow.NumPy; using static Tensorflow.KerasApi; +using static Tensorflow.Binding; namespace Tensorflow.Keras.UnitTest.Layers { @@ -193,5 +195,128 @@ public void BasicConv2D_ksize_dilation_same() Assert.AreEqual(x.dims[2], y.shape[2]); Assert.AreEqual(filters, y.shape[3]); } + + + [TestMethod] + public void BasicDepthwiseConv2D() + { + var conv = keras.layers.DepthwiseConv2D(kernel_size:3, strides:1, activation: null, + padding:"same", depthwise_initializer: "ones"); + + var x = np.arange(2 * 9* 9* 3).reshape((2, 9, 9, 3)); + var x2 = ops.convert_to_tensor(x, TF_DataType.TF_FLOAT); + + var y = conv.Apply(x2); + + print($"input:{x2.shape} DepthwiseConv2D.out: {y.shape}"); + + + Assert.AreEqual(4, y.shape.ndim); + var arr = y.numpy().reshape((2, 9, 9, 3)); + + AssertArray(x[new int[] { 1, 1, 1 }].ToArray(), new int[] { 273, 274, 275 }); + AssertArray(arr[new int[] { 1, 1, 1 }].ToArray(), new float[] { 2457f, 2466f, 2475f }); + + var bn = keras.layers.BatchNormalization(); + var y2 = bn.Apply(y); + arr = y2.numpy().ToArray(); + + double delta = 0.0001; // 误差范围 + + Assert.AreEqual(arr[0], 59.97002f, delta); + Assert.AreEqual(arr[1], 63.96802f, delta); + } + + + [TestMethod] + public void BasicDepthwiseConv2D_strides_2() + { + var conv = keras.layers.DepthwiseConv2D(kernel_size: 3, strides: (1, 2, 2, 1), activation: null, + padding: "same", depthwise_initializer: "ones"); + + var x = np.arange(2 * 9 * 9 * 3).reshape((2, 9, 9, 3)); + var x2 = ops.convert_to_tensor(x, TF_DataType.TF_FLOAT); + + var y = conv.Apply(x2); + + print($"input:{x2.shape} DepthwiseConv2D.out: {y.shape}"); + + Assert.AreEqual(4, y.shape.ndim); + var arr = y.numpy().reshape((2, 5, 5, 3)); + + AssertArray(x[new int[] { 1, 1, 1 }].ToArray(), new int[] { 273, 274, 275 }); + AssertArray(arr[new int[] { 1, 1, 1 }].ToArray(), new float[] { 2727f, 2736f, 2745f }); + + var bn = keras.layers.BatchNormalization(); + var y2 = bn.Apply(y); + arr = y2.numpy().ToArray(); + + double delta = 0.0001; // 误差范围 + + Assert.AreEqual(arr[0], 59.97002f, delta); + Assert.AreEqual(arr[1], 63.96802f, delta); + } + + + + [TestMethod] + public void BasicDepthwiseConv2D_strides_3() + { + var conv = keras.layers.DepthwiseConv2D(kernel_size: 3, strides: 3, activation: null, + padding: "same", depthwise_initializer: "ones"); + + var x = np.arange(2 * 9 * 9 * 3).reshape((2, 9, 9, 3)); + var x2 = ops.convert_to_tensor(x, TF_DataType.TF_FLOAT); + + var y = conv.Apply(x2); + + print($"input:{x2.shape} DepthwiseConv2D.out: {y.shape}"); + + Assert.AreEqual(4, y.shape.ndim); + var arr = y.numpy().reshape((2, 3, 3, 3)); + + AssertArray(x[new int[] { 1, 1, 1 }].ToArray(), new int[] { 273, 274, 275 }); + AssertArray(arr[new int[] { 1, 1, 1 }].ToArray(), new float[] { 3267f, 3276f, 3285f }); + + var bn = keras.layers.BatchNormalization(); + var y2 = bn.Apply(y); + arr = y2.numpy().ToArray(); + + double delta = 0.0001; // 误差范围 + + Assert.AreEqual(arr[0], 269.86508f, delta); + Assert.AreEqual(arr[1], 278.8606f, delta); + + } + [TestMethod] + public void BasicDepthwiseConv2D_UseBias() + { + var conv = keras.layers.DepthwiseConv2D(kernel_size: 3, strides: 1, activation: null, + use_bias: true, padding: "same", + depthwise_initializer: "ones", + bias_initializer:"ones" + ); + + var weight = conv.get_weights(); + + var x = np.arange(9 * 9 * 3).reshape((1, 9, 9, 3)); + var x2 = ops.convert_to_tensor(x, TF_DataType.TF_FLOAT); + var y = conv.Apply(x2); + + Assert.AreEqual(4, y.shape.ndim); + var arr = y.numpy().ToArray(); + + Assert.AreEqual(arr[0], 61f); + Assert.AreEqual(arr[1], 65f); + + var bn = keras.layers.BatchNormalization(); + var y2 = bn.Apply(y); + arr = y2.numpy().ToArray(); + + double delta = 0.0001; // 误差范围 + + Assert.AreEqual(arr[0], 60.96952f, delta); + Assert.AreEqual(arr[1], 64.96752f, delta); + } } } diff --git a/test/TensorFlowNET.UnitTest/EagerModeTestBase.cs b/test/TensorFlowNET.UnitTest/EagerModeTestBase.cs index d08f4e505..b7b9ae128 100644 --- a/test/TensorFlowNET.UnitTest/EagerModeTestBase.cs +++ b/test/TensorFlowNET.UnitTest/EagerModeTestBase.cs @@ -20,6 +20,20 @@ public bool Equal(float f1, float f2) return Math.Abs(f1 - f2) <= tolerance; } + public bool Equal(long[] l1, long[] l2) + { + if (l1.Length != l2.Length) + return false; + + for (var i = 0; i < l1.Length; i++) + { + if (l1[i] != l2[i]) + return false; + } + + return true; + } + public bool Equal(float[] f1, float[] f2) { bool ret = false; From 5e4f53077f94ddf8513dd925f18eeb05b81a9482 Mon Sep 17 00:00:00 2001 From: dogvane Date: Sun, 8 Oct 2023 21:52:55 +0800 Subject: [PATCH 137/182] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E5=9B=BE=E7=89=87?= =?UTF-8?q?=E5=B7=A6=E5=8F=B3=E5=92=8C=E4=B8=8A=E4=B8=8B=E7=BF=BB=E8=BD=AC?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98=EF=BC=8C=E5=B9=B6=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E5=AF=B9=E5=BA=94=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- data/img001.bmp | Bin 0 -> 178662 bytes src/TensorFlowNET.Core/APIs/tf.image.cs | 7 + src/TensorFlowNET.Core/APIs/tf.io.cs | 7 + .../Keras/Layers/ILayersApi.cs | 6 + .../Operations/image_ops_impl.cs | 43 ++- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 23 +- .../TensorFlowNET.Graph.UnitTest/ImageTest.cs | 90 +++++ .../ManagedAPI/ArrayOpsTest.cs | 317 ++++++++++++++++++ .../TensorFlowNET.UnitTest/NumPy/ShapeTest.cs | 44 +++ 9 files changed, 525 insertions(+), 12 deletions(-) create mode 100644 data/img001.bmp create mode 100644 test/TensorFlowNET.UnitTest/NumPy/ShapeTest.cs diff --git a/data/img001.bmp b/data/img001.bmp new file mode 100644 index 0000000000000000000000000000000000000000..d149d76f1ac11b4f5f6700560f9bba04868f8ab4 GIT binary patch literal 178662 zcmeI5G14wM4MiWUf?&c4SOE(lBVd!j0!Y~qMKT#V2t4KLvfOT2mSnm6zIrp&-Jjdm zJ@@?Io0+1DKmPfj|M=~X|NZ&{{q=kL>)*fr^_w5RqpKf3{{HLd|G)Y5Z~wtB5C8!X z009sH0T2KI5C8!X009sH0T2KI5C8!X009sH0T2KI5C8!X009sH0T2KI5C8!X009sH z0T2KI5C8!X009sH0T2KI5C8!X009sH0T2KI5C8!X009sH0T2KI5C8!X009sH0T2KI z5C8!X009sH0T2KI5C8!X009sH0T2KI5C8!X009sH0T2KI5C8!X009sH0T2KI5C8!X z009sH0T2Lz&k(r&|61buBLx2SmmlSK$@^B>Khh>*RsvP*syE!OaBy)hfkte)rC)IS z+(I#VA;86k+i>Fr1n$J9Xdn6BerwE+=NgC&w?FR2(ecM(!|kKX{qpdSV#Dn(>v3-O zuGkcZe7^oVH{f{kYeu>rFUF?_5x5hZB7CIukHkF4Nt00rV#93{!q{XBfw9FtGIoe3AQ2mG$M=F07YPW(hTDsr@p>=;x!7&*!`i4C{So5F@u1pLH?+fzkgu`K~t zvEjCDS=dcZz*}s%E$;#=4G9E@4Yv*Z!d5~8F=E4QVI5dzMIcOUxNTJ#_K^^X6dP_! z8o?S90>NU#ZIjlp$s>VyvElY(DOj*hV2jvrd%d{KJN*39FOuJ`5q60Ux39JS?N5Kj z?{^R=wSMG(*!&pt9i1XYC4lek`n_G5;Nl$w;5OWThsitz+^%lZ1?Ph&hY~2YKK_H~ zwf%FiB{6xAdGbpBa|f+48F=NaE&o;It~g2QCdBq`B8+)QY2)w`)11beJvx=x#6(!S z6TPr)>1o@9({V#95&M;^BaM)9!&o($&MA~r=HmnHu~3!vwB#8k{NF~1?N zJKu?PU9;ty50+DGf-_b67MLc7&cN5V(QN54=*&-< zjoEOcTl0~+8&+qUi|*pR4{5?YVs{IgEqyOf_34V3@p?Bt*IRXJ*B85DmziD9rOk!a zC`$L9++%-5Od&w4X{*W2h1LGH`wYYm&|6rIqI6qIF}vwv3K_0PXnYRPV`b(?v0dkv zm!d0uh0K>br@tmc>eVE-LZ2eg5G6s)BPpl@KEn=l~tC<7#S&Z3GwHlb4V70UPJ`%CZ zp?;xeTUadJoT1Z(i!mFjbh|#Mp9AZ&H9N?~E-!jSQ&g~6dIKTt?RrcF+cAwsfbrs8 zpLVULhmZ3#>FD%h+PJ-imAqKQo0FD*G;TqsyScNS(Sd>)Ao8@Y?{X;<_d7D(rwwA z?`9Qq1$7B!(zrRF>xNyiuM6bY>tR=bTa_-)yJe+e%oW;{y{UT(jlrRYVwY1^@-Ct_ zEPX(1`$x7h8|LzKP0@5Ke>Y&Ix!5%GN@O+X!pgSv0RFloY-2Xe=~g&ZQ?&4{m}MSf zm&tXh)sQPL+tTZ{*)Ggu>d5O_4uI(=HVxmTrrXCvI=s!c=A}3LC%H9`serGjIEJ39 z*d;=f{V|sUFW=L=bbdsJCmu0p`Oo$n!!1B;3Nn>_7VI4;kJ9hX+W6cl=2Qz){l>70 z5u38evri!%Q*tVOd}#cqPBG<0H09E5JcQ=NFtN+I4S{BudMig-U%9(*D*f)bUC;eu z3cCDf(jUk0UJw)WuBL9M z-jVD6NyMgkx*YDH)%n#IF{P`=vwATsX1A$sr&iI%`{WUu=Hwi@xWk)>bH z4ZaSJ$y>YD8jwAq*p&JTfg>`<5?s3Z{K5@`WAfszwF+leFUu)5#koS@h}3!SUJNe% zZVd0|@iBRKwOQA&X~m}0l>mq1!6d$Pj}(u4Zi&eoEag5)iBjXWb69JCJg9h!nrF5q(gKc-k9<)zddg&_yN)yC5KFn&+=#^jw^ z>3xdW)GUYKS9jTYZ|UCY>i&CU@>)&YK1l5NFKw!RZ|T)N?~Cm*x!#MB4-%V_n#Y&W z+g`e4$n`ZDVjA`fu_a1mC|xq=;+hOGt=v9KY%0zw_^bmNN}rusu|7%6^_C&Fy!|Al z%Lmr2OcIm-E7eq-Qpg#>4WTticN{8Vh#wXKT1`)I>qCQ zsbb2%y5A&r@Rv3zk*f4ZMb!(k#gym`vDI~#7qgYV80YTwgfYi|WOv2BOPB5WI9k}W z2}}3Lo-pR%tNUeQ$N$qvQ7B>QA90&va=q!{(PGp5&6oBrv1t;xO%OKzYD}P8?)Jv< zW{>7^W_K`L54R0rDz@TZ-HqOgJ$Jpr#oqX(O%UMrE)(jxT#5SCUDxEl=>CxI9UE@% zYZUQ;S34Irpe*9us*+K@rAu_3T9YtlBX(Es>QA|titM`hI#}~3j_bKon-zrJWN3qM~+?htI=9cF* zdD5KH-0~q!4mI2)N$G}(x?5$4DHOZR&&_Gn>fUy<&pzz#+&tvt&7KmlK11nWh&d%T z0qM%q2=;g@1pBy8v*2z#;c;Qh;C2A&S+UDVUA^g}#uThd|Fq@`Einw_*+|^z->S5i(;3-rd%@d zd;nx>n87|VAHi)W&L(2hY%4kmRhJW38Ag7J)k?T+Me@3jmDpv^Bt|6}Ix+o>#fi=DQ;JI?Pp+?KOVC3YFx+(YvX<#^3k!fgrC zuGREV@~-O|a}~`ubd5jW9Bz;K(5Gp|E|VMDXt>oQ)qn+Xdki_b*oJz)Ol%?pw+C)_ z#U8BR^^u8|CY$Uc&^;G!cd^jJQ^YRwFPgdd`W?f)&*65L|5;*p_3q}nd)T;rx9qIv zaGUpaqS%z8@snnQYb7{ywVU$dkeR()f_E01yP8qc;!kg z^EKSA?VK)l2{PsC)Zz;2lnc0BntF%WQ+mH!Y(fFI3BsGirZ_@rh0Ds%%ZY!7+b)m5SzO0YTSLu#dyJ!54!ykv5TDC9KMV2HMc({_M_f+R=nf+ zr`+y}-4J-+ZSzHay?tNUL85p3!15Fn`I};wuCG4V>wyNZ{D@oejwfBWU)kVYsH$|i zxLfw(=kG9?r=ZH3@9h_t*o_9axfSntQtGaHciVsc!>ae|lXe5X!(^lY0T2KI5C8!X z009sH0T2KI5C8!X009sH0T2KI5C8!X009sH0T2KI5C8!X009svKkpSF2u{F#r sApo}}l(6PR0&x4p)-bb#0Nj>P!kQBa!0i)T!^{!_a9ctNYfdEaUtXM_NB{r; literal 0 HcmV?d00001 diff --git a/src/TensorFlowNET.Core/APIs/tf.image.cs b/src/TensorFlowNET.Core/APIs/tf.image.cs index ac9cbc60d..41ef52967 100644 --- a/src/TensorFlowNET.Core/APIs/tf.image.cs +++ b/src/TensorFlowNET.Core/APIs/tf.image.cs @@ -339,6 +339,13 @@ public Tensor decode_image(Tensor contents, int channels = 0, TF_DataType dtype => image_ops_impl.decode_image(contents, channels: channels, dtype: dtype, name: name, expand_animations: expand_animations); + public Tensor encode_png(Tensor contents, string name = null) + => image_ops_impl.encode_png(contents, name: name); + + public Tensor encode_jpeg(Tensor contents, string name = null) + => image_ops_impl.encode_jpeg(contents, name: name); + + /// /// Convenience function to check if the 'contents' encodes a JPEG image. /// diff --git a/src/TensorFlowNET.Core/APIs/tf.io.cs b/src/TensorFlowNET.Core/APIs/tf.io.cs index be1e86e6c..ea1e44b28 100644 --- a/src/TensorFlowNET.Core/APIs/tf.io.cs +++ b/src/TensorFlowNET.Core/APIs/tf.io.cs @@ -16,6 +16,7 @@ limitations under the License. using System.Collections.Generic; using Tensorflow.IO; +using Tensorflow.Operations; namespace Tensorflow { @@ -46,6 +47,12 @@ public Operation save_v2(Tensor prefix, string[] tensor_names, public Tensor[] restore_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, TF_DataType[] dtypes, string name = null) => ops.restore_v2(prefix, tensor_names, shape_and_slices, dtypes, name: name); + + public Operation write_file(string filename, Tensor conentes, string name = null) + => write_file(Tensorflow.ops.convert_to_tensor(filename, TF_DataType.TF_STRING), conentes, name); + + public Operation write_file(Tensor filename, Tensor conentes, string name = null) + => gen_ops.write_file(filename, conentes, name); } public GFile gfile = new GFile(); diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index a8141d354..3fd98e7a8 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -55,6 +55,12 @@ public ILayer Conv1D(int filters, string kernel_initializer = "glorot_uniform", string bias_initializer = "zeros"); + public ILayer Conv2D(int filters, + Shape kernel_size = null, + Shape strides = null, + string padding = "valid" + ); + public ILayer Conv2D(int filters, Shape kernel_size = null, Shape strides = null, diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs index 318b8b142..f1aff28ee 100644 --- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs +++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs @@ -102,7 +102,10 @@ internal static Operation[] _CheckAtLeast3DImage(Tensor image, bool require_stat { throw new ValueError("\'image\' must be fully defined."); } - var dims = image_shape["-3:"]; + var dims = new Shape(new[] { + image_shape.dims[image_shape.dims.Length - 3], + image_shape.dims[image_shape.dims.Length - 2], + image_shape.dims[image_shape.dims.Length - 1]}); foreach (var dim in dims.dims) { if (dim == 0) @@ -112,16 +115,18 @@ internal static Operation[] _CheckAtLeast3DImage(Tensor image, bool require_stat } var image_shape_last_three_elements = new Shape(new[] { - image_shape.dims[image_shape.dims.Length - 1], + image_shape.dims[image_shape.dims.Length - 3], image_shape.dims[image_shape.dims.Length - 2], - image_shape.dims[image_shape.dims.Length - 3]}); + image_shape.dims[image_shape.dims.Length - 1]}); if (!image_shape_last_three_elements.IsFullyDefined) { Tensor image_shape_ = array_ops.shape(image); - var image_shape_return = tf.constant(new[] { - image_shape_.dims[image_shape.dims.Length - 1], - image_shape_.dims[image_shape.dims.Length - 2], - image_shape_.dims[image_shape.dims.Length - 3]}); + var image_shape_return = tf.slice(image_shape_, new[] { Math.Max(image_shape.dims.Length - 3, 0) }, new[] { 3 }); + + //var image_shape_return = tf.constant(new[] { + // image_shape_.dims[image_shape_.dims.Length - 3], + // image_shape_.dims[image_shape_.dims.Length - 2], + // image_shape_.dims[image_shape_.dims.Length - 1]}); return new Operation[] { check_ops.assert_positive( @@ -209,10 +214,10 @@ internal static Tensor _random_flip(Tensor image, int flip_index, int seed, stri } public static Tensor flip_left_right(Tensor image) - => _flip(image, 0, "flip_left_right"); + => _flip(image, 1, "flip_left_right"); public static Tensor flip_up_down(Tensor image) - => _flip(image, 1, "flip_up_down"); + => _flip(image, 0, "flip_up_down"); internal static Tensor _flip(Tensor image, int flip_index, string scope_name) { @@ -223,11 +228,11 @@ internal static Tensor _flip(Tensor image, int flip_index, string scope_name) Shape shape = image.shape; if (shape.ndim == 3 || shape.ndim == Unknown) { - return fix_image_flip_shape(image, gen_array_ops.reverse(image, ops.convert_to_tensor(new int[] { flip_index }))); + return fix_image_flip_shape(image, gen_array_ops.reverse_v2(image, ops.convert_to_tensor(new int[] { flip_index }))); } else if (shape.ndim == 4) { - return gen_array_ops.reverse_v2(image, ops.convert_to_tensor(new[] { (flip_index + 1) % 2 })); + return gen_array_ops.reverse_v2(image, ops.convert_to_tensor(new[] { flip_index + 1 })); } else { @@ -2047,6 +2052,22 @@ internal static (Tensor, Tensor) non_max_suppression_padded_v1(Tensor boxes, Ten }); } + public static Tensor encode_jpeg(Tensor contents, string name = null) + { + return tf_with(ops.name_scope(name, "encode_jpeg"), scope => + { + return gen_ops.encode_jpeg(contents, name:name); + }); + } + + public static Tensor encode_png(Tensor contents, string name = null) + { + return tf_with(ops.name_scope(name, "encode_png"), scope => + { + return gen_ops.encode_png(contents, name: name); + }); + } + public static Tensor is_jpeg(Tensor contents, string name = null) { return tf_with(ops.name_scope(name, "is_jpeg"), scope => diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 95828fbf7..bcc19dc22 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -112,7 +112,28 @@ public ILayer Conv1D(int filters, KernelInitializer = GetInitializerByName(kernel_initializer), BiasInitializer = GetInitializerByName(bias_initializer) }); - + public ILayer Conv2D(int filters, + Shape kernel_size = null, + Shape strides = null, + string padding = "valid") + => new Conv2D(new Conv2DArgs + { + Rank = 2, + Filters = filters, + KernelSize = (kernel_size == null) ? (5, 5) : kernel_size, + Strides = strides == null ? (1, 1) : strides, + Padding = padding, + DataFormat = null, + DilationRate = (1, 1), + Groups = 1, + UseBias = false, + KernelRegularizer = null, + KernelInitializer =tf.glorot_uniform_initializer, + BiasInitializer = tf.zeros_initializer, + BiasRegularizer = null, + ActivityRegularizer = null, + Activation = keras.activations.Linear, + }); /// /// 2D convolution layer (e.g. spatial convolution over images). /// This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. diff --git a/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs b/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs index d671b6096..127b65bf6 100644 --- a/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/ImageTest.cs @@ -4,6 +4,7 @@ using Tensorflow; using static Tensorflow.Binding; using System; +using System.IO; namespace TensorFlowNET.UnitTest { @@ -164,5 +165,94 @@ public void TestCropAndResize() Assert.AreEqual(result.size, 16ul); Assert.AreEqual(result[0, 0, 0, 0], 12f); } + + [TestMethod] + public void ImageSaveTest() + { + var imgPath = TestHelper.GetFullPathFromDataDir("img001.bmp"); + var jpegImgPath = TestHelper.GetFullPathFromDataDir("img001.jpeg"); + var pngImgPath = TestHelper.GetFullPathFromDataDir("img001.png"); + + File.Delete(jpegImgPath); + File.Delete(pngImgPath); + + var contents = tf.io.read_file(imgPath); + var bmp = tf.image.decode_image(contents); + Assert.AreEqual(bmp.name, "decode_image/DecodeImage:0"); + + var jpeg = tf.image.encode_jpeg(bmp); + var op1 = tf.io.write_file(jpegImgPath, jpeg); + + var png = tf.image.encode_png(bmp); + var op2 = tf.io.write_file(pngImgPath, png); + + this.session().run(op1); + this.session().run(op2); + + Assert.IsTrue(File.Exists(jpegImgPath), "not find file:" + jpegImgPath); + Assert.IsTrue(File.Exists(pngImgPath), "not find file:" + pngImgPath); + + // 如果要测试图片正确性,需要注释下面两行代码 + File.Delete(jpegImgPath); + File.Delete(pngImgPath); + } + + [TestMethod] + public void ImageFlipTest() + { + var imgPath = TestHelper.GetFullPathFromDataDir("img001.bmp"); + + var contents = tf.io.read_file(imgPath); + var bmp = tf.image.decode_image(contents); + + // 左右翻转 + var lrImgPath = TestHelper.GetFullPathFromDataDir("img001_lr.png"); + File.Delete(lrImgPath); + + var lr = tf.image.flip_left_right(bmp); + var png = tf.image.encode_png(lr); + var op = tf.io.write_file(lrImgPath, png); + this.session().run(op); + + Assert.IsTrue(File.Exists(lrImgPath), "not find file:" + lrImgPath); + + // 上下翻转 + var updownImgPath = TestHelper.GetFullPathFromDataDir("img001_updown.png"); + File.Delete(updownImgPath); + + var updown = tf.image.flip_up_down(bmp); + var pngupdown = tf.image.encode_png(updown); + var op2 = tf.io.write_file(updownImgPath, pngupdown); + this.session().run(op2); + Assert.IsTrue(File.Exists(updownImgPath)); + + + // 暂时先人工观测图片是否翻转,观测时需要删除下面这两行代码 + File.Delete(lrImgPath); + File.Delete(updownImgPath); + + // 多图翻转 + // 目前直接通过 bmp 拿到 shape ,这里先用默认定义图片大小来构建了 + var mImg = tf.stack(new[] { bmp, lr }, axis:0); + print(mImg.shape); + + var up2 = tf.image.flip_up_down(mImg); + + var updownImgPath_m1 = TestHelper.GetFullPathFromDataDir("img001_m_ud.png"); // 直接上下翻转 + File.Delete(updownImgPath_m1); + + var img001_updown_m2 = TestHelper.GetFullPathFromDataDir("img001_m_lr_ud.png"); // 先左右再上下 + File.Delete(img001_updown_m2); + + var png2 = tf.image.encode_png(up2[0]); + tf.io.write_file(updownImgPath_m1, png2); + + png2 = tf.image.encode_png(up2[1]); + tf.io.write_file(img001_updown_m2, png2); + + // 如果要测试图片正确性,需要注释下面两行代码 + File.Delete(updownImgPath_m1); + File.Delete(img001_updown_m2); + } } } diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs index 675689bb1..e25c9779d 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs @@ -3,6 +3,7 @@ using Tensorflow; using static Tensorflow.Binding; using System.Linq; +using Tensorflow.Operations; namespace TensorFlowNET.UnitTest.ManagedAPI { @@ -105,5 +106,321 @@ public void ReverseArray() Assert.IsTrue(Equal(a[0].ToArray().Reverse().ToArray(), b[0].ToArray())); Assert.IsTrue(Equal(a[1].ToArray().Reverse().ToArray(), b[1].ToArray())); } + + [TestMethod] + public void ReverseImgArray3D() + { + // 创建 sourceImg 数组 + var sourceImgArray = new float[,,] { + { + { 237, 28, 36 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }; + var sourceImg = ops.convert_to_tensor(sourceImgArray); + + // 创建 lrImg 数组 + var lrImgArray = new float[,,] { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 237, 28, 36 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }; + var lrImg = ops.convert_to_tensor(lrImgArray); + + var lr = tf.image.flip_left_right(sourceImg); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr.numpy().ToArray()), "tf.image.flip_left_right fail."); + + var lr2 = tf.reverse(sourceImg, 1); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr2.numpy().ToArray()), "tf.reverse (axis=1) fail."); + + var lr3 = gen_array_ops.reverse_v2(sourceImg, ops.convert_to_tensor(new[] { 1 })); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr3.numpy().ToArray()), "gen_array_ops.reverse_v2 axis=1 fail."); + + // 创建 udImg 数组 + var udImgArray = new float[,,] { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 237, 28, 36 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }; + var udImg = ops.convert_to_tensor(udImgArray); + + var ud = tf.image.flip_up_down(sourceImg); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud.numpy().ToArray()), "tf.image.flip_up_down fail."); + + var ud2 = tf.reverse(sourceImg, new Axis(0)); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud2.numpy().ToArray()), "tf.reverse (axis=0) fail."); + + var ud3 = gen_array_ops.reverse_v2(sourceImg, ops.convert_to_tensor(new[] { 0 })); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud3.numpy().ToArray()), "gen_array_ops.reverse_v2 axis=0 fail."); + } + + [TestMethod] + public void ReverseImgArray4D() + { + // 原图左上角,加一张左右翻转后的图片 + var m = new float[,,,] { + { + { + { 237, 28, 36 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }, + { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 237, 28, 36 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + } + }; + var sourceImg = ops.convert_to_tensor(m); + + var lrArray = new float[,,,] { + { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 237, 28, 36 }, + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }, + { + { + { 237, 28, 36 }, + { 255, 255, 255 }, + { 255, 255, 255 }, + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + } + }; + var lrImg = ops.convert_to_tensor(lrArray); + + // 创建 ud 数组 + var udArray = new float[,,,] { + { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 237, 28, 36 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }, + { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 237, 28, 36 } + } + } + }; + var udImg = ops.convert_to_tensor(udArray); + + var ud3 = gen_array_ops.reverse_v2(sourceImg, ops.convert_to_tensor(new[] { 1 })); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud3.numpy().ToArray()), "gen_array_ops.reverse_v2 axis=1 fail."); + + var ud2 = tf.reverse(sourceImg, new Axis(1)); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud2.numpy().ToArray()), "tf.reverse (axis=1) fail."); + + var ud = tf.image.flip_up_down(sourceImg); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud.numpy().ToArray()), "tf.image.flip_up_down fail."); + + // 左右翻转 + var lr = tf.image.flip_left_right(sourceImg); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr.numpy().ToArray()), "tf.image.flip_left_right fail."); + + var lr2 = tf.reverse(sourceImg, 0); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr2.numpy().ToArray()), "tf.reverse (axis=1) fail."); + + var lr3 = gen_array_ops.reverse_v2(sourceImg, ops.convert_to_tensor(new[] { 0 })); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr3.numpy().ToArray()), "gen_array_ops.reverse_v2 axis=1 fail."); + + } + + [TestMethod] + public void ReverseImgArray4D_3x3() + { + // 原图左上角,加一张左右翻转后的图片 + var m = new float[,,,] { + { + { + { 237, 28, 36 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }, + { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 237, 28, 36 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + } + }; + var sourceImg = ops.convert_to_tensor(m); + + var lrArray = new float[,,,] { + { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 237, 28, 36 }, + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }, + { + { + { 237, 28, 36 }, + { 255, 255, 255 }, + { 255, 255, 255 }, + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + } + }; + var lrImg = ops.convert_to_tensor(lrArray); + + // 创建 ud 数组 + var udArray = new float[,,,] { + { + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 237, 28, 36 }, + { 255, 255, 255 }, + { 255, 255, 255 } + } + }, + { { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 255, 255, 255 } + }, + { + { 255, 255, 255 }, + { 255, 255, 255 }, + { 237, 28, 36 } + } + } + }; + var udImg = ops.convert_to_tensor(udArray); + + var ud3 = gen_array_ops.reverse_v2(sourceImg, ops.convert_to_tensor(new[] { 1 })); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud3.numpy().ToArray()), "gen_array_ops.reverse_v2 axis=1 fail."); + + var ud2 = tf.reverse(sourceImg, new Axis(1)); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud2.numpy().ToArray()), "tf.reverse (axis=1) fail."); + + var ud = tf.image.flip_up_down(sourceImg); + Assert.IsTrue(Equal(udImg.numpy().ToArray(), ud.numpy().ToArray()), "tf.image.flip_up_down fail."); + + // 左右翻转 + var lr = tf.image.flip_left_right(sourceImg); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr.numpy().ToArray()), "tf.image.flip_left_right fail."); + + var lr2 = tf.reverse(sourceImg, 0); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr2.numpy().ToArray()), "tf.reverse (axis=1) fail."); + + var lr3 = gen_array_ops.reverse_v2(sourceImg, ops.convert_to_tensor(new[] { 0 })); + Assert.IsTrue(Equal(lrImg.numpy().ToArray(), lr3.numpy().ToArray()), "gen_array_ops.reverse_v2 axis=1 fail."); + + } } } diff --git a/test/TensorFlowNET.UnitTest/NumPy/ShapeTest.cs b/test/TensorFlowNET.UnitTest/NumPy/ShapeTest.cs new file mode 100644 index 000000000..f5a8685be --- /dev/null +++ b/test/TensorFlowNET.UnitTest/NumPy/ShapeTest.cs @@ -0,0 +1,44 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow.NumPy; +using System; +using System.Linq; +using static Tensorflow.Binding; +using Tensorflow; + +namespace TensorFlowNET.UnitTest.NumPy +{ + [TestClass] + public class ShapeTest : EagerModeTestBase + { + [Ignore] + [TestMethod] + public unsafe void ShapeGetLastElements() + { + // test code from function _CheckAtLeast3DImage + // 之前的 _CheckAtLeast3DImage 有bug,现在通过测试,下面的代码是正确的 + // todo: shape["-3:"] 的写法,目前有bug,需要修复,单元测试等修复后再放开,暂时先忽略测试 + + var image_shape = new Shape(new[] { 32, 64, 3 }); + var image_shape_4d = new Shape(new[] { 4, 64, 32, 3 }); + + var image_shape_last_three_elements = new Shape(new[] { + image_shape.dims[image_shape.dims.Length - 3], + image_shape.dims[image_shape.dims.Length - 2], + image_shape.dims[image_shape.dims.Length - 1]}); + + var image_shape_last_three_elements2 = image_shape["-3:"]; + + Assert.IsTrue(Equal(image_shape_last_three_elements.dims, image_shape_last_three_elements2.dims), "3dims get fail."); + + var image_shape_last_three_elements_4d = new Shape(new[] { + image_shape_4d.dims[image_shape_4d.dims.Length - 3], + image_shape_4d.dims[image_shape_4d.dims.Length - 2], + image_shape_4d.dims[image_shape_4d.dims.Length - 1]}); + + var image_shape_last_three_elements2_4d = image_shape_4d["-3:"]; + + Assert.IsTrue(Equals(image_shape_last_three_elements_4d.dims, image_shape_last_three_elements2_4d.dims), "4dims get fail."); + } + + } +} \ No newline at end of file From baf620a3e875e7cf6cfa82eb3c56392e2b7fab9a Mon Sep 17 00:00:00 2001 From: dogvane Date: Sun, 8 Oct 2023 22:06:15 +0800 Subject: [PATCH 138/182] =?UTF-8?q?=E8=A7=A3=E5=86=B3keras=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E4=B8=8B=EF=BC=8C=E4=BD=BF=E7=94=A8GPU=E8=AE=AD?= =?UTF-8?q?=E7=BB=83=E6=97=B6=E4=BC=9A=E7=88=86=E6=98=BE=E5=AD=98=E7=9A=84?= =?UTF-8?q?bug=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 观察到的现象是,一些模型增大batchsize后,会在首个epoch的中途爆显存不足,只要过了一个epoch后,就能完整训练。同样的batchsize在python下能设置大得多的值。 最后使用最小训练代码分析出,是每个step之后,图片加载到显存里的数据没有释放导致的。 在寻找释放显存接口没有结果的时候,直接使用了GC.Collect();可以让显存主动回收。 因此当前的修复方案是在每个step里,都执行一次 GC.Collect(); 用来释放显存资源。 --- src/TensorFlowNET.Core/Keras/Engine/IModel.cs | 23 +++++++++++++++++++ .../Engine/Model.Evaluate.cs | 3 +++ src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 12 +++++----- .../Engine/Model.Predict.cs | 2 +- 4 files changed, 33 insertions(+), 7 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs index 1840f88b9..889c76d91 100644 --- a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs +++ b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs @@ -24,6 +24,7 @@ ICallback fit(NDArray x, NDArray y, List callbacks = null, float validation_split = 0f, ValidationDataPack validation_data = null, + int validation_step = 10, bool shuffle = true, Dictionary class_weight = null, NDArray sample_weight = null, @@ -47,6 +48,20 @@ ICallback fit(IEnumerable x, NDArray y, int workers = 1, bool use_multiprocessing = false); + public ICallback fit(IDatasetV2 dataset, + int batch_size = -1, + int epochs = 1, + int verbose = 1, + List callbacks = null, + IDatasetV2 validation_data = null, + int validation_step = 10, // 间隔多少次会进行一次验证 + bool shuffle = true, + Dictionary class_weight = null, + int initial_epoch = 0, + int max_queue_size = 10, + int workers = 1, + bool use_multiprocessing = false); + void save(string filepath, bool overwrite = true, bool include_optimizer = true, @@ -85,6 +100,14 @@ Tensors predict(Tensors x, int workers = 1, bool use_multiprocessing = false); + public Tensors predict(IDatasetV2 dataset, + int batch_size = -1, + int verbose = 0, + int steps = -1, + int max_queue_size = 10, + int workers = 1, + bool use_multiprocessing = false); + void summary(int line_length = -1, float[] positions = null); IKerasConfig get_config(); diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index 94a2e6646..474d5e5a5 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -132,6 +132,7 @@ Dictionary evaluate(DataHandler data_handler, CallbackList callba var end_step = step + data_handler.StepIncrement; if (!is_val) callbacks.on_test_batch_end(end_step, logs); + GC.Collect(); } } callbacks.on_test_end(logs); @@ -167,7 +168,9 @@ Dictionary test_step_multi_inputs_function(DataHandler data_handl Dictionary test_step(DataHandler data_handler, Tensors x, Tensors y) { (x,y) = data_handler.DataAdapter.Expand1d(x, y); + var y_pred = Apply(x, training: false); + var loss = compiled_loss.Call(y, y_pred); compiled_metrics.update_state(y, y_pred); return metrics.Select(x => (x.Name, x.result())).ToDictionary(x => x.Item1, x => (float)x.Item2); diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index 689fc9fb8..d61211c71 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -41,6 +41,7 @@ public ICallback fit(NDArray x, NDArray y, List callbacks = null, float validation_split = 0f, ValidationDataPack validation_data = null, + int validation_step = 10, bool shuffle = true, Dictionary class_weight = null, NDArray sample_weight = null, @@ -147,7 +148,7 @@ public ICallback fit(IEnumerable x, NDArray y, } } - public History fit(IDatasetV2 dataset, + public ICallback fit(IDatasetV2 dataset, int batch_size = -1, int epochs = 1, int verbose = 1, @@ -156,7 +157,6 @@ public History fit(IDatasetV2 dataset, int validation_step = 10, bool shuffle = true, Dictionary class_weight = null, - NDArray sample_weight = null, int initial_epoch = 0, int max_queue_size = 10, int workers = 1, @@ -170,7 +170,7 @@ public History fit(IDatasetV2 dataset, InitialEpoch = initial_epoch, Epochs = epochs, Shuffle = shuffle, - SampleWeight = sample_weight, + ClassWeight = class_weight, MaxQueueSize = max_queue_size, Workers = workers, UseMultiprocessing = use_multiprocessing, @@ -218,6 +218,7 @@ History FitInternal(DataHandler data_handler, int epochs, int validation_step, i var end_step = step + data_handler.StepIncrement; End_step = end_step; callbacks.on_train_batch_end(end_step, logs); + GC.Collect(); } if (validation_data != null) @@ -233,11 +234,10 @@ History FitInternal(DataHandler data_handler, int epochs, int validation_step, i callbacks.on_train_batch_end(End_step, logs); } + GC.Collect(); callbacks.on_epoch_end(epoch, logs); - GC.Collect(); - GC.WaitForPendingFinalizers(); if (stop_training) { break; @@ -282,6 +282,7 @@ History FitInternal(DataHandler data_handler, int epochs, int verbose, List { { "outputs", batch_outputs } }); + GC.Collect(); } } From 93a242c08a330399328c8a1190f6b0d46308a226 Mon Sep 17 00:00:00 2001 From: Jucko13 Date: Tue, 10 Oct 2023 16:53:04 +0200 Subject: [PATCH 139/182] Implemented support for loading Concatenate layers model.load_model now supports loading of concatenate layers. python tensorflow exports concatenate layers in an extra nested array in the manifest so added a check for that in generic_utils.cs. Concatenate was missing the build=true, this fix prevents the layer being build multiple times. Concatenate has 2 or more input nodes so List was required instead of just NodeConfig in Functional.FromConfig.cs. Added missing axis JsonProperty attribute for MergeArgs (used by Concatenate) --- .../Keras/ArgsDefinition/Merging/MergeArgs.cs | 6 ++-- .../Engine/Functional.FromConfig.cs | 30 +++++++++++-------- .../Layers/Merging/Concatenate.cs | 1 + .../Utils/generic_utils.cs | 13 +++++++- 4 files changed, 35 insertions(+), 15 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/MergeArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/MergeArgs.cs index 0140b3dd0..9bcf1908e 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/MergeArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/MergeArgs.cs @@ -1,13 +1,15 @@ -using System; +using Newtonsoft.Json; +using System; using System.Collections.Generic; using System.Text; namespace Tensorflow.Keras.ArgsDefinition { // TODO: complete the implementation - public class MergeArgs : LayerArgs + public class MergeArgs : AutoSerializeLayerArgs { public Tensors Inputs { get; set; } + [JsonProperty("axis")] public int Axis { get; set; } } } diff --git a/src/TensorFlowNET.Keras/Engine/Functional.FromConfig.cs b/src/TensorFlowNET.Keras/Engine/Functional.FromConfig.cs index 7b826af8e..375fc9106 100644 --- a/src/TensorFlowNET.Keras/Engine/Functional.FromConfig.cs +++ b/src/TensorFlowNET.Keras/Engine/Functional.FromConfig.cs @@ -30,7 +30,7 @@ public static (Tensors, Tensors, Dictionary) reconstruct_from_co created_layers = created_layers ?? new Dictionary(); var node_index_map = new Dictionary<(string, int), int>(); var node_count_by_layer = new Dictionary(); - var unprocessed_nodes = new Dictionary(); + var unprocessed_nodes = new Dictionary>(); // First, we create all layers and enqueue nodes to be processed foreach (var layer_data in config.Layers) process_layer(created_layers, layer_data, unprocessed_nodes, node_count_by_layer); @@ -79,7 +79,7 @@ public static (Tensors, Tensors, Dictionary) reconstruct_from_co static void process_layer(Dictionary created_layers, LayerConfig layer_data, - Dictionary unprocessed_nodes, + Dictionary> unprocessed_nodes, Dictionary node_count_by_layer) { ILayer layer = null; @@ -92,32 +92,38 @@ static void process_layer(Dictionary created_layers, created_layers[layer_name] = layer; } - node_count_by_layer[layer] = _should_skip_first_node(layer) ? 1 : 0; + node_count_by_layer[layer] = layer_data.InboundNodes.Count - (_should_skip_first_node(layer) ? 1 : 0); var inbound_nodes_data = layer_data.InboundNodes; foreach (var node_data in inbound_nodes_data) { if (!unprocessed_nodes.ContainsKey(layer)) - unprocessed_nodes[layer] = node_data; + unprocessed_nodes[layer] = new List() { node_data }; else - unprocessed_nodes.Add(layer, node_data); + unprocessed_nodes[layer].Add(node_data); } } static void process_node(ILayer layer, - NodeConfig node_data, + List nodes_data, Dictionary created_layers, Dictionary node_count_by_layer, Dictionary<(string, int), int> node_index_map) { + var input_tensors = new List(); - var inbound_layer_name = node_data.Name; - var inbound_node_index = node_data.NodeIndex; - var inbound_tensor_index = node_data.TensorIndex; - var inbound_layer = created_layers[inbound_layer_name]; - var inbound_node = inbound_layer.InboundNodes[inbound_node_index]; - input_tensors.Add(inbound_node.Outputs[inbound_node_index]); + for (int i = 0; i < nodes_data.Count; i++) + { + var node_data = nodes_data[i]; + var inbound_layer_name = node_data.Name; + var inbound_node_index = node_data.NodeIndex; + var inbound_tensor_index = node_data.TensorIndex; + + var inbound_layer = created_layers[inbound_layer_name]; + var inbound_node = inbound_layer.InboundNodes[inbound_node_index]; + input_tensors.Add(inbound_node.Outputs[inbound_node_index]); + } var output_tensors = layer.Apply(input_tensors); diff --git a/src/TensorFlowNET.Keras/Layers/Merging/Concatenate.cs b/src/TensorFlowNET.Keras/Layers/Merging/Concatenate.cs index a2a8286ba..fa82426ce 100644 --- a/src/TensorFlowNET.Keras/Layers/Merging/Concatenate.cs +++ b/src/TensorFlowNET.Keras/Layers/Merging/Concatenate.cs @@ -39,6 +39,7 @@ public override void build(KerasShapesWrapper input_shape) shape_set.Add(shape); }*/ _buildInputShape = input_shape; + built = true; } protected override Tensors _merge_function(Tensors inputs) diff --git a/src/TensorFlowNET.Keras/Utils/generic_utils.cs b/src/TensorFlowNET.Keras/Utils/generic_utils.cs index 5402f4995..20937e2e5 100644 --- a/src/TensorFlowNET.Keras/Utils/generic_utils.cs +++ b/src/TensorFlowNET.Keras/Utils/generic_utils.cs @@ -112,12 +112,23 @@ public static FunctionalConfig deserialize_model_config(JToken json) foreach (var token in layersToken) { var args = deserialize_layer_args(token["class_name"].ToObject(), token["config"]); + + List nodeConfig = null; //python tensorflow sometimes exports inbound nodes in an extra nested array + if (token["inbound_nodes"].Count() > 0 && token["inbound_nodes"][0].Count() > 0 && token["inbound_nodes"][0][0].Count() > 0) + { + nodeConfig = token["inbound_nodes"].ToObject>>().FirstOrDefault() ?? new List(); + } + else + { + nodeConfig = token["inbound_nodes"].ToObject>(); + } + config.Layers.Add(new LayerConfig() { Config = args, Name = token["name"].ToObject(), ClassName = token["class_name"].ToObject(), - InboundNodes = token["inbound_nodes"].ToObject>() + InboundNodes = nodeConfig, }); } config.InputLayers = json["input_layers"].ToObject>(); From 9f0ffa4bc83b181ddd525cf1b90d77a32e073fa3 Mon Sep 17 00:00:00 2001 From: Jucko13 Date: Tue, 10 Oct 2023 17:02:22 +0200 Subject: [PATCH 140/182] Implemented unittests for Concatenate layers and calls The loading and saving of a simple model with a Concatenate layer is tested to check if the model is the same after reloading. Implemented missing axis parameter for np.stack (added some handy tuple calls too like the np.concatenate example). --- .../NumPy/Numpy.Manipulation.cs | 9 ++++ .../Layers/Layers.Merging.Test.cs | 15 ++++--- .../Model/ModelLoadTest.cs | 43 +++++++++++++++++++ 3 files changed, 62 insertions(+), 5 deletions(-) diff --git a/src/TensorFlowNET.Core/NumPy/Numpy.Manipulation.cs b/src/TensorFlowNET.Core/NumPy/Numpy.Manipulation.cs index 940856056..5e2574170 100644 --- a/src/TensorFlowNET.Core/NumPy/Numpy.Manipulation.cs +++ b/src/TensorFlowNET.Core/NumPy/Numpy.Manipulation.cs @@ -30,6 +30,15 @@ public static NDArray concatenate((NDArray, NDArray) tuple, int axis = 0) [AutoNumPy] public static NDArray stack(params NDArray[] arrays) => new NDArray(array_ops.stack(arrays)); + [AutoNumPy] + public static NDArray stack(NDArray[] arrays, int axis = 0) => new NDArray(array_ops.stack(arrays, axis)); + + [AutoNumPy] + public static NDArray stack((NDArray, NDArray) tuple, int axis = 0) => new NDArray(array_ops.stack(new[] { tuple.Item1, tuple.Item2 }, axis)); + + [AutoNumPy] + public static NDArray stack((NDArray, NDArray, NDArray) tuple, int axis = 0) => new NDArray(array_ops.stack(new[] { tuple.Item1, tuple.Item2, tuple.Item3 }, axis)); + [AutoNumPy] public static NDArray moveaxis(NDArray array, Axis source, Axis destination) => new NDArray(array_ops.moveaxis(array, source, destination)); } diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Merging.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Merging.Test.cs index 36e44e482..9bc2fa767 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Merging.Test.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Layers.Merging.Test.cs @@ -1,4 +1,5 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Collections.Generic; using Tensorflow.NumPy; using static Tensorflow.KerasApi; @@ -8,12 +9,16 @@ namespace Tensorflow.Keras.UnitTest.Layers public class LayersMergingTest : EagerModeTestBase { [TestMethod] - public void Concatenate() + [DataRow(1, 4, 1, 5)] + [DataRow(2, 2, 2, 5)] + [DataRow(3, 2, 1, 10)] + public void Concatenate(int axis, int shapeA, int shapeB, int shapeC) { - var x = np.arange(20).reshape((2, 2, 5)); - var y = np.arange(20, 30).reshape((2, 1, 5)); - var z = keras.layers.Concatenate(axis: 1).Apply(new Tensors(x, y)); - Assert.AreEqual((2, 3, 5), z.shape); + var x = np.arange(10).reshape((1, 2, 1, 5)); + var y = np.arange(10, 20).reshape((1, 2, 1, 5)); + var z = keras.layers.Concatenate(axis: axis).Apply(new Tensors(x, y)); + Assert.AreEqual((1, shapeA, shapeB, shapeC), z.shape); } + } } diff --git a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs index cb570fc0c..53a67cbfa 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs @@ -1,10 +1,13 @@ using Microsoft.VisualStudio.TestPlatform.Utilities; using Microsoft.VisualStudio.TestTools.UnitTesting; +using Newtonsoft.Json.Linq; using System.Linq; +using System.Xml.Linq; using Tensorflow.Keras.Engine; using Tensorflow.Keras.Optimizers; using Tensorflow.Keras.UnitTest.Helpers; using Tensorflow.NumPy; +using static HDF.PInvoke.H5Z; using static Tensorflow.Binding; using static Tensorflow.KerasApi; @@ -124,4 +127,44 @@ public void TestModelBeforeTF2_5() var model = tf.saved_model.load(@"D:\development\temp\saved_model") as Tensorflow.Keras.Engine.Model; model.summary(); } + + + + [TestMethod] + public void CreateConcatenateModelSaveAndLoad() + { + // a small demo model that is just here to see if the axis value for the concatenate method is saved and loaded. + var input_layer = tf.keras.layers.Input((8, 8, 5)); + + var conv1 = tf.keras.layers.Conv2D(2, kernel_size: 3, activation: "relu", padding: "same"/*, data_format: "_conv_1"*/).Apply(input_layer); + conv1.Name = "conv1"; + + var conv2 = tf.keras.layers.Conv2D(2, kernel_size: 3, activation: "relu", padding: "same"/*, data_format: "_conv_2"*/).Apply(input_layer); + conv2.Name = "conv2"; + + var concat1 = tf.keras.layers.Concatenate(axis: 3).Apply((conv1, conv2)); + concat1.Name = "concat1"; + + var model = tf.keras.Model(input_layer, concat1); + model.compile(tf.keras.optimizers.Adam(), tf.keras.losses.CategoricalCrossentropy()); + + model.save(@"Assets/concat_axis3_model"); + + + var tensorInput = np.arange(320).reshape((1, 8, 8, 5)).astype(TF_DataType.TF_FLOAT); + + var tensors1 = model.predict(tensorInput); + + Assert.AreEqual((1, 8, 8, 4), tensors1.shape); + + model = null; + keras.backend.clear_session(); + + var model2 = tf.keras.models.load_model(@"Assets/concat_axis3_model"); + + var tensors2 = model2.predict(tensorInput); + + Assert.AreEqual(tensors1.shape, tensors2.shape); + } + } From ec4f372a29b5cbc5fe6c0d6b8414ddb48c22e548 Mon Sep 17 00:00:00 2001 From: dogvane Date: Mon, 16 Oct 2023 11:22:58 +0800 Subject: [PATCH 141/182] add relu6 --- src/TensorFlowNET.Core/APIs/tf.nn.cs | 5 ++++ .../Keras/Activations/Activations.cs | 1 + .../Keras/Layers/ILayersApi.cs | 3 +++ src/TensorFlowNET.Keras/Activations.cs | 7 ++++++ .../Layers/Activation/ReLu6.cs | 25 +++++++++++++++++++ src/TensorFlowNET.Keras/Layers/LayersApi.cs | 9 +++++++ 6 files changed, 50 insertions(+) create mode 100644 src/TensorFlowNET.Keras/Layers/Activation/ReLu6.cs diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index 397c68c7c..112c48628 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -101,6 +101,8 @@ public Tensor embedding_lookup(Tensor @params, name: name); public IActivation relu() => new relu(); + + public IActivation swish() => new swish(); public IActivation tanh() => new tanh(); @@ -111,6 +113,9 @@ public Tensor tanh(Tensor x, string name = null) public Tensor relu(Tensor features, string name = null) => gen_nn_ops.relu(features, name); + public Tensor relu6(Tensor features, string name = null) + => gen_nn_ops.relu6(features, name); + public Tensor[] fused_batch_norm(Tensor x, Tensor scale, Tensor offset, diff --git a/src/TensorFlowNET.Core/Keras/Activations/Activations.cs b/src/TensorFlowNET.Core/Keras/Activations/Activations.cs index f0d59ed62..37264104a 100644 --- a/src/TensorFlowNET.Core/Keras/Activations/Activations.cs +++ b/src/TensorFlowNET.Core/Keras/Activations/Activations.cs @@ -32,6 +32,7 @@ public interface IActivationsApi Activation Linear { get; } Activation Relu { get; } + Activation Relu6 { get; } Activation Sigmoid { get; } diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs index 3fd98e7a8..57273eb08 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs @@ -180,6 +180,9 @@ public ILayer LayerNormalization(Axis? axis, public ILayer Normalization(Shape? input_shape = null, int? axis = -1, float? mean = null, float? variance = null, bool invert = false); public ILayer LeakyReLU(float alpha = 0.3f); + public ILayer ReLU6(); + + public IRnnCell LSTMCell(int uints, string activation = "tanh", string recurrent_activation = "sigmoid", diff --git a/src/TensorFlowNET.Keras/Activations.cs b/src/TensorFlowNET.Keras/Activations.cs index ce5b4eb13..d3801902f 100644 --- a/src/TensorFlowNET.Keras/Activations.cs +++ b/src/TensorFlowNET.Keras/Activations.cs @@ -20,6 +20,11 @@ public class Activations: IActivationsApi Name = "relu", ActivationFunction = (features, name) => tf.Context.ExecuteOp("Relu", name, new ExecuteOpArgs(features)) }; + private static Activation _relu6 = new Activation() + { + Name = "relu6", + ActivationFunction = (features, name) => tf.Context.ExecuteOp("Relu6", name, new ExecuteOpArgs(features)) + }; private static Activation _sigmoid = new Activation() { Name = "sigmoid", @@ -55,6 +60,7 @@ static Activations() _nameActivationMap = new Dictionary(); RegisterActivation(_relu); + RegisterActivation(_relu6); RegisterActivation(_linear); RegisterActivation(_sigmoid); RegisterActivation(_softmax); @@ -65,6 +71,7 @@ static Activations() public Activation Linear => _linear; public Activation Relu => _relu; + public Activation Relu6 => _relu6; public Activation Sigmoid => _sigmoid; diff --git a/src/TensorFlowNET.Keras/Layers/Activation/ReLu6.cs b/src/TensorFlowNET.Keras/Layers/Activation/ReLu6.cs new file mode 100644 index 000000000..5af3f7677 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Activation/ReLu6.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Keras.ArgsDefinition; +using Tensorflow.Keras.Engine; +using Tensorflow.Common.Types; +using static Tensorflow.Binding; + +namespace Tensorflow.Keras.Layers +{ + /// + /// Leaky version of a Rectified Linear Unit. + /// + public class ReLu6 : Layer + { + public ReLu6() : base(new LayerArgs { }) + { + } + + protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) + { + return tf.nn.relu6(inputs); + } + } +} diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index bcc19dc22..e2adb23d0 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -735,6 +735,15 @@ public ILayer LeakyReLU(float alpha = 0.3f) }); + /// + /// Leaky version of a Rectified Linear Unit. + /// + /// Negative slope coefficient. + /// + public ILayer ReLU6() + => new ReLu6(); + + public IRnnCell SimpleRNNCell( int units, string activation = "tanh", From eb4ff88d39160e6046e43fe5e7453ea3e1abeac4 Mon Sep 17 00:00:00 2001 From: SMURF Date: Wed, 18 Oct 2023 23:34:15 +0100 Subject: [PATCH 142/182] fix: Saving a loaded model --- src/TensorFlowNET.Keras/Engine/Layer.Serialize.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Keras/Engine/Layer.Serialize.cs b/src/TensorFlowNET.Keras/Engine/Layer.Serialize.cs index ed5c2de0a..49811417e 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.Serialize.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.Serialize.cs @@ -27,6 +27,6 @@ public override IDictionary _trackable_children(SaveType save children = new Dictionary(); } - return children.Concat(base._trackable_children(save_type, cache)).ToDictionary(x => x.Key, x => x.Value); + return children.Concat(base._trackable_children(save_type, cache)).GroupBy(x => x.Key).Select(g => g.First()).ToDictionary(x => x.Key, x => x.Value); } } \ No newline at end of file From a73694ab2db42b2a4ea560c6bbb36ed9175fc5fb Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Fri, 20 Oct 2023 11:24:27 +0800 Subject: [PATCH 143/182] fix: add the implementation of the tile's grad --- .../Gradients/array_grad.cs | 24 +++++++++++++++++++ .../Operations/array_ops.cs | 2 +- .../GradientTest/GradientEagerTest.cs | 14 +++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs index 4b7027992..016e4f029 100644 --- a/src/TensorFlowNET.Core/Gradients/array_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs @@ -381,5 +381,29 @@ public static Tensor[] _ReverseV2Grad(Operation op, Tensor[] grads) var axis = op.inputs[1]; return new Tensor[] { array_ops.reverse(grad, axis), null }; } + + [RegisterGradient("Tile")] + public static Tensor[] _TileGrad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var input_shape = array_ops.shape(op.inputs[0], out_type: op.inputs[1].dtype); + var split_shape = array_ops.reshape(array_ops.transpose(array_ops.stack(new Tensor[] { op.inputs[1], input_shape })), new Shape(-1)); + var axes = math_ops.range(0, array_ops.size(split_shape), 2); + + //# Sum reduces grad along the first dimension for IndexedSlices + //if isinstance(grad, indexed_slices_lib.IndexedSlices): + //input_shape_0 = math_ops.cast(input_shape[0], grad.indices.dtype) + //grad = math_ops.unsorted_segment_sum( + // grad.values, math_ops.mod(grad.indices, input_shape_0), input_shape_0) + //split_shape = array_ops.concat([[1], split_shape[1:]], axis = 0) + + var input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes); + if (!tf.Context.executing_eagerly()) + { + input_grad.set_shape(op.inputs[0].GetShape()); + } + return new Tensor[] { input_grad, null }; + + } } } diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index fdc53cd7e..abf44c643 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -990,7 +990,7 @@ public static Tensor gather(ResourceVariable @params, Tensor indices, string nam return @params.sparse_read(indices, name); } - public static Tensor transpose(T1 a, Axis perm, string name = "transpose", bool conjugate = false) + public static Tensor transpose(T1 a, Axis perm = null, string name = "transpose", bool conjugate = false) { return tf_with(ops.name_scope(name, "transpose", new { a }), scope => { diff --git a/test/TensorFlowNET.UnitTest/GradientTest/GradientEagerTest.cs b/test/TensorFlowNET.UnitTest/GradientTest/GradientEagerTest.cs index e41e1d617..ed7599045 100644 --- a/test/TensorFlowNET.UnitTest/GradientTest/GradientEagerTest.cs +++ b/test/TensorFlowNET.UnitTest/GradientTest/GradientEagerTest.cs @@ -173,5 +173,19 @@ public void ConditionalMultiply() var result = grad(x, 4); Assert.AreEqual((float)result, 4.0f); } + + [TestMethod] + public void Tile() + { + var a = tf.constant(new int[] { 1 }, TF_DataType.TF_FLOAT); + var b = tf.constant(new int[] { 2 }); + using (var tape = tf.GradientTape()) + { + tape.watch(a); + var y = tf.tile(a, b); + var grad = tape.gradient(y, a); + Assert.AreEqual((float)grad.numpy(), 2.0f); + } + } } } From 3fcc4d8d1540c7c01ce4ca05ea883874abd4e5e5 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Fri, 20 Oct 2023 11:30:33 +0800 Subject: [PATCH 144/182] fix: add the GRU, LSTM, SimpleRNN's OptionalArgs --- .../Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs | 4 +--- .../Keras/ArgsDefinition/Rnn/LSTMOptionalArgs.cs | 11 +++++++++++ .../Keras/ArgsDefinition/Rnn/SimpleRNNOptionalArgs.cs | 11 +++++++++++ 3 files changed, 23 insertions(+), 3 deletions(-) create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMOptionalArgs.cs create mode 100644 src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNOptionalArgs.cs diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs index d441dc828..1d215576f 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs @@ -4,10 +4,8 @@ namespace Tensorflow.Keras.ArgsDefinition { - public class GRUOptionalArgs + public class GRUOptionalArgs : RnnOptionalArgs { public string Identifier => "GRU"; - - public Tensor Mask { get; set; } = null; } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMOptionalArgs.cs new file mode 100644 index 000000000..2829927c3 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMOptionalArgs.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition.Rnn +{ + public class LSTMOptionalArgs : RnnOptionalArgs + { + public string Identifier => "LSTM"; + } +} diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNOptionalArgs.cs new file mode 100644 index 000000000..a8b8caf06 --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNOptionalArgs.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.ArgsDefinition.Rnn +{ + public class SimpleRNNOptionalArgs : RnnOptionalArgs + { + public string Identifier => "SimpleRNN"; + } +} From d0ec6591a0cc0ea3325a7fc723435b23eabc757b Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Fri, 20 Oct 2023 15:40:35 +0800 Subject: [PATCH 145/182] fix: add the implementation of GatherND's grad --- src/TensorFlowNET.Core/APIs/tf.array.cs | 10 ++++++++++ .../Gradients/array_grad.cs | 19 +++++++++++++++++++ .../Operations/array_ops.cs | 2 +- .../GradientTest/GradientEagerTest.cs | 17 ++++++++++++++++- 4 files changed, 46 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs index 4d9c3da58..b529cd319 100644 --- a/src/TensorFlowNET.Core/APIs/tf.array.cs +++ b/src/TensorFlowNET.Core/APIs/tf.array.cs @@ -140,6 +140,16 @@ public Tensor identity(Tensor input, string name = null) public Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0) => array_ops.gather(@params, indices, name: name, axis: ops.convert_to_tensor(axis)); + /// + /// Gather slices from `params` into a Tensor with shape specified by `indices`. + /// + /// + /// + /// + /// + public Tensor gather_nd(Tensor @params, Tensor indices, string name = null) + => gen_array_ops.gather_nd(@params, indices, name: name); + /// /// Return the elements, either from `x` or `y`, depending on the `condition`. /// diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs index 016e4f029..a4da60eed 100644 --- a/src/TensorFlowNET.Core/Gradients/array_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs @@ -403,7 +403,26 @@ public static Tensor[] _TileGrad(Operation op, Tensor[] grads) input_grad.set_shape(op.inputs[0].GetShape()); } return new Tensor[] { input_grad, null }; + } + [RegisterGradient("GatherNd")] + public static Tensor[] _GatherNdGrad(Operation op, Tensor[] grads) + { + var @ref = op.inputs[0]; + var indices = op.inputs[1]; + var grad = grads[0]; + var ref_shape = array_ops.shape(@ref, out_type: indices.dtype); + Tensor ref_grad = null; + if (indices.shape.ndim == 2 && indices.shape.dims[indices.shape.Length - 1] == 1) + { + ref_grad = (Tensor)new IndexedSlices(grad, array_ops.squeeze(indices, axis: -1), ref_shape); + } + else + { + ref_grad = gen_array_ops.scatter_nd(indices, grad, ref_shape); + } + return new Tensor[] { ref_grad, null }; } + } } diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index abf44c643..57af3b835 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -829,7 +829,7 @@ public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end, /// A `Tensor`. Has the same type as `input`. /// Contains the same data as `input`, but has one or more dimensions of /// size 1 removed. - public static Tensor squeeze(Tensor input, int[] axis = null, string name = null) + public static Tensor squeeze(Tensor input, Axis axis = null, string name = null) => gen_array_ops.squeeze(input, axis, name); public static Tensor identity(Tensor input, string name = null) diff --git a/test/TensorFlowNET.UnitTest/GradientTest/GradientEagerTest.cs b/test/TensorFlowNET.UnitTest/GradientTest/GradientEagerTest.cs index ed7599045..1cfceb3e3 100644 --- a/test/TensorFlowNET.UnitTest/GradientTest/GradientEagerTest.cs +++ b/test/TensorFlowNET.UnitTest/GradientTest/GradientEagerTest.cs @@ -62,7 +62,7 @@ public void SquaredDifference_1D() // Calcute the gradient of (x1-x2)^2 // by Automatic Differentiation in Eager mode // Expected is 2*(abs(x1-x2)) - Tensor x1 = new NDArray( new float[] { 1, 3, 5, 21, 19, 17 }); + Tensor x1 = new NDArray(new float[] { 1, 3, 5, 21, 19, 17 }); Tensor x2 = new NDArray(new float[] { 29, 27, 23, 7, 11, 13 }); float[] expected = new float[] { @@ -187,5 +187,20 @@ public void Tile() Assert.AreEqual((float)grad.numpy(), 2.0f); } } + + [TestMethod] + public void GatherNdTest() + { + var x = tf.constant(new float[,] { { 1.0f, 2.0f, 3.0f }, { 1.0f, 2.0f, 3.0f }, { 1.0f, 2.0f, 3.0f } }, dtype: TF_DataType.TF_FLOAT); + var indices = tf.constant(new int[,] { { 0, 1 }, { 1, 1 }, { 2, 1 } }, dtype: TF_DataType.TF_INT32); + using (var tape = tf.GradientTape()) + { + tape.watch(x); + var res = tf.gather_nd(x, indices); + var grad = tape.gradient(res, x); + var expected = np.array(new float[,] { { 0f, 1f, 0f }, { 0f, 1f, 0f }, { 0f, 1f, 0f } }); + Assert.IsTrue(Enumerable.SequenceEqual(grad.ToArray(), expected.ToArray())); + } + } } } From 4e42d7f3a8ee574caf9c3896bb6438e88cbab211 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Sat, 4 Nov 2023 10:18:50 +0800 Subject: [PATCH 146/182] fix: fix the bug of boolean_mask --- src/TensorFlowNET.Core/Operations/NnOps/rnn.cs | 4 ++-- src/TensorFlowNET.Core/Operations/array_ops.cs | 13 +++++++++---- src/TensorFlowNET.Core/Operations/nn_ops.cs | 2 +- .../Basics/TensorTest.cs | 7 ++++--- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs index 6b9f073c1..55f139207 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs @@ -428,9 +428,9 @@ public static Tensor _transpose_batch_time(Tensor x) return x; var x_rank = array_ops.rank(x); - var con1 = new object[] + var con1 = new Tensor[] { - new []{1, 0 }, + new Tensor(new int[]{0, 2}), math_ops.range(2, x_rank) }; var x_t = array_ops.transpose(x, array_ops.concat(con1, 0)); diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 57af3b835..1b424006d 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -166,6 +166,11 @@ public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boo throw new ValueError("mask cannot be scalar."); var leading_size = gen_math_ops.prod(shape(tensor_tensor)[$"{axis}:{axis + ndims_mask}"], ops.convert_to_tensor(new[] { 0 })); + if (leading_size.rank == 0) + { + leading_size = expand_dims(leading_size, 0); + } + var shape1 = concat(new[] { shape(tensor_tensor)[$":{axis}"], @@ -185,7 +190,7 @@ public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boo private static Tensor _apply_mask_1d(Tensor reshaped_tensor, Tensor mask, int axis = 0) { - var indices = squeeze(where(mask), axis: new[] { 1 }); + var indices = squeeze(where_v2(mask), axis: new[] { 1 }); return gather(reshaped_tensor, indices, axis: ops.convert_to_tensor(axis)); } @@ -940,12 +945,12 @@ public static Tensor broadcast_static_shape(Tensor shape_x, Tensor shape_y) /// public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat") { - return tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); + return gen_array_ops.concat_v2(values, axis, name: name); } - public static Tensor concat(object[] values, int axis, string name = "concat") + public static Tensor concat(Tensor[] values, Axis axis, string name = "concat") { - return tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); + return gen_array_ops.concat_v2(values, axis, name: name); } /// diff --git a/src/TensorFlowNET.Core/Operations/nn_ops.cs b/src/TensorFlowNET.Core/Operations/nn_ops.cs index 00d7d316b..394a591ab 100644 --- a/src/TensorFlowNET.Core/Operations/nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/nn_ops.cs @@ -287,7 +287,7 @@ private static Tensor _flatten_outer_dims(Tensor logits) new[] { math_ops.subtract(rank, 1) }, new[] { constant_op.constant(1) }); - var ops = array_ops.concat(new[] { new[] { -1 }, (object)last_dim_size }, 0); + var ops = array_ops.concat(new Tensor[] { new Tensor(new int[] {1}), last_dim_size }, 0); var output = array_ops.reshape(logits, ops); // Set output shape if known. diff --git a/test/TensorFlowNET.Graph.UnitTest/Basics/TensorTest.cs b/test/TensorFlowNET.Graph.UnitTest/Basics/TensorTest.cs index 90de78743..8093c1f23 100644 --- a/test/TensorFlowNET.Graph.UnitTest/Basics/TensorTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/Basics/TensorTest.cs @@ -3,6 +3,7 @@ using System; using System.Linq; using static Tensorflow.Binding; +using Tensorflow; namespace TensorFlowNET.UnitTest.Basics { @@ -60,14 +61,14 @@ public void batch_to_space_nd() Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 15, 21, 16, 22, 17, 23 }, result[0, 3].ToArray())); } - [TestMethod, Ignore] + [TestMethod] public void boolean_mask() { + if (!tf.executing_eagerly()) + tf.enable_eager_execution(); var tensor = new[] { 0, 1, 2, 3 }; var mask = np.array(new[] { true, false, true, false }); var masked = tf.boolean_mask(tensor, mask); - var sess = tf.Session(); - var result = sess.run(masked); Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 0, 2 }, masked.ToArray())); } } From f721baee711cc79a5270e72d73acb475ed4abaf0 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Sun, 5 Nov 2023 14:05:41 +0800 Subject: [PATCH 147/182] test: add the concat_v2 test --- .../TensorFlow.Kernel.UnitTest.csproj | 24 +++++++ .../array_ops/concat_op_test.cs | 65 +++++++++++++++++++ TensorFlow.NET.sln | 21 ++++++ 3 files changed, 110 insertions(+) create mode 100644 TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj create mode 100644 TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs diff --git a/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj b/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj new file mode 100644 index 000000000..a52a4cda6 --- /dev/null +++ b/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj @@ -0,0 +1,24 @@ + + + + net6.0 + enable + enable + + false + true + + + + + + + + + + + + + + + diff --git a/TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs b/TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs new file mode 100644 index 000000000..cfa8f0fbf --- /dev/null +++ b/TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs @@ -0,0 +1,65 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; +using Tensorflow.NumPy; +using TensorFlow; +using static Tensorflow.Binding; +using static Tensorflow.KerasApi; + +namespace TensorFlow.Kernel.UnitTest +{ + [TestClass] + public class concat_op_test + { + [TestMethod] + public void testConcatEmpty() + { + var t1 = tf.constant(new int[] { }); + var t2 = tf.constant(new int[] { }); + var c = array_ops.concat(new[] { t1, t2 }, 0); + var expected = np.array(new int[] { }); + Assert.IsTrue(Enumerable.SequenceEqual(expected.ToArray(), c.numpy().ToArray())); + } + + [TestMethod] + public void testConcatNegativeAxis() + { + var t1 = tf.constant(new int[,] {{ 1, 2, 3 }, { 4, 5, 6 } }); + var t2 = tf.constant(new int[,] { { 7, 8, 9 }, { 10, 11, 12 } }); + var c = array_ops.concat(new[] { t1, t2 }, -2); + var expected = np.array(new int[,,] { { { 1, 2, 3 }, { 4, 5, 6 } }, { { 7, 8, 9 }, { 10, 11, 12 } } }); + Assert.IsTrue(Enumerable.SequenceEqual(expected.ToArray(), c.numpy().ToArray())); + + c = array_ops.concat(new[] { t1, t2 }, -1); + expected = np.array(new int[,] { { 1, 2, 3, 7, 8, 9 }, { 4, 5, 6, 10, 11, 12 } }); + Assert.IsTrue(Enumerable.SequenceEqual(expected.ToArray(), c.numpy().ToArray())); + } + + [TestMethod] + [DataRow(TF_DataType.TF_INT32)] + [DataRow(TF_DataType.TF_INT64)] + [DataRow(TF_DataType.TF_UINT32)] + [DataRow(TF_DataType.TF_UINT64)] + public void testConcatDtype(TF_DataType dtype) + { + var t1 = tf.constant(new int[,] { { 1, 2, 3 }, { 4, 5, 6 } }, dtype: dtype); + var t2 = tf.constant(new int[,] { { 7, 8, 9 }, { 10, 11, 12 } }, dtype: dtype); + var c = array_ops.concat(new[] { t1, t2 }, 1); + var expected = np.array(new int[,] { { 1, 2, 3, 7, 8, 9 }, { 4, 5, 6, 10, 11, 12 } }); + Assert.IsTrue(Enumerable.SequenceEqual(expected.ToArray(), tf.cast(c, TF_DataType.TF_INT32).numpy().ToArray())); + + } + + [TestMethod] + [DataRow(TF_DataType.TF_INT32)] + [DataRow(TF_DataType.TF_INT64)] + public void testConcatAxisType(TF_DataType dtype) + { + var t1 = tf.constant(new int[,] { { 1, 2, 3 }, {4, 5, 6 } }); + var t2 = tf.constant(new int[,] { { 7, 8, 9 }, { 10, 11, 12 } }); + var c = array_ops.concat(new[] { t1, t2 }, tf.constant(1, dtype: dtype)); + var expected = np.array(new int[,] { { 1, 2, 3, 7, 8, 9 }, { 4, 5, 6, 10, 11, 12 } }); + Assert.IsTrue(Enumerable.SequenceEqual(expected.ToArray(), tf.cast(c, TF_DataType.TF_INT32).numpy().ToArray())); + } + + } +} diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 87729e27d..a246407b0 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -39,6 +39,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "too EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Console", "tools\TensorFlowNET.Console\Tensorflow.Console.csproj", "{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlow.Kernel.UnitTest", "TensorFlow.Kernel.UnitTest\TensorFlow.Kernel.UnitTest.csproj", "{C08C6692-4818-46C1-8462-2F0CC40C9152}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -322,6 +324,24 @@ Global {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.Build.0 = Release|x64 {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.ActiveCfg = Release|Any CPU {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.Build.0 = Release|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|x64.ActiveCfg = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|x64.Build.0 = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|x86.ActiveCfg = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|x86.Build.0 = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|Any CPU.Build.0 = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|x64.ActiveCfg = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|x64.Build.0 = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|x86.ActiveCfg = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|x86.Build.0 = Debug|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|Any CPU.Build.0 = Release|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|x64.ActiveCfg = Release|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|x64.Build.0 = Release|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|x86.ActiveCfg = Release|Any CPU + {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -342,6 +362,7 @@ Global {D24FCAA5-548C-4251-B226-A1B6535D0845} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {C23563DB-FE21-48E7-A411-87A109E4A899} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {C08C6692-4818-46C1-8462-2F0CC40C9152} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} From 8c06bbb0169f4c96c5c17bdd5fcbf07557665d03 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Sun, 5 Nov 2023 20:47:58 +0800 Subject: [PATCH 148/182] fix: fix the bug caused by concat_v2 --- src/TensorFlowNET.Core/Operations/NnOps/rnn.cs | 4 ++-- src/TensorFlowNET.Core/Operations/array_ops.cs | 6 +++--- src/TensorFlowNET.Core/Operations/nn_ops.cs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs index 55f139207..6b9f073c1 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs @@ -428,9 +428,9 @@ public static Tensor _transpose_batch_time(Tensor x) return x; var x_rank = array_ops.rank(x); - var con1 = new Tensor[] + var con1 = new object[] { - new Tensor(new int[]{0, 2}), + new []{1, 0 }, math_ops.range(2, x_rank) }; var x_t = array_ops.transpose(x, array_ops.concat(con1, 0)); diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 1b424006d..548a885ed 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -945,12 +945,12 @@ public static Tensor broadcast_static_shape(Tensor shape_x, Tensor shape_y) /// public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat") { - return gen_array_ops.concat_v2(values, axis, name: name); + return tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); } - public static Tensor concat(Tensor[] values, Axis axis, string name = "concat") + public static Tensor concat(object[] values, int axis, string name = "concat") { - return gen_array_ops.concat_v2(values, axis, name: name); + return tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); } /// diff --git a/src/TensorFlowNET.Core/Operations/nn_ops.cs b/src/TensorFlowNET.Core/Operations/nn_ops.cs index 394a591ab..00d7d316b 100644 --- a/src/TensorFlowNET.Core/Operations/nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/nn_ops.cs @@ -287,7 +287,7 @@ private static Tensor _flatten_outer_dims(Tensor logits) new[] { math_ops.subtract(rank, 1) }, new[] { constant_op.constant(1) }); - var ops = array_ops.concat(new Tensor[] { new Tensor(new int[] {1}), last_dim_size }, 0); + var ops = array_ops.concat(new[] { new[] { -1 }, (object)last_dim_size }, 0); var output = array_ops.reshape(logits, ops); // Set output shape if known. From 7fd455041d85dc4143a4a6e4d876b9c22be51f51 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Sun, 5 Nov 2023 21:51:33 +0800 Subject: [PATCH 149/182] refactor: refacter the place of the kernel unittest folder --- TensorFlow.NET.sln | 40 +++++++++---------- .../TensorFlow.Kernel.UnitTest.csproj | 4 +- .../array_ops/concat_op_test.cs | 10 ++--- 3 files changed, 26 insertions(+), 28 deletions(-) rename {TensorFlow.Kernel.UnitTest => test/TensorFlow.Kernel.UnitTest}/TensorFlow.Kernel.UnitTest.csproj (74%) rename {TensorFlow.Kernel.UnitTest => test/TensorFlow.Kernel.UnitTest}/array_ops/concat_op_test.cs (89%) diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index a246407b0..214b039d4 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -39,7 +39,7 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "too EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Console", "tools\TensorFlowNET.Console\Tensorflow.Console.csproj", "{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlow.Kernel.UnitTest", "TensorFlow.Kernel.UnitTest\TensorFlow.Kernel.UnitTest.csproj", "{C08C6692-4818-46C1-8462-2F0CC40C9152}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlow.Kernel.UnitTest", "test\TensorFlow.Kernel.UnitTest\TensorFlow.Kernel.UnitTest.csproj", "{654A027D-1364-4729-880B-144DFE1FF5BB}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -324,24 +324,24 @@ Global {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.Build.0 = Release|x64 {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.ActiveCfg = Release|Any CPU {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.Build.0 = Release|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|Any CPU.Build.0 = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|x64.ActiveCfg = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|x64.Build.0 = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|x86.ActiveCfg = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Debug|x86.Build.0 = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|Any CPU.ActiveCfg = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|Any CPU.Build.0 = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|x64.ActiveCfg = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|x64.Build.0 = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|x86.ActiveCfg = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.GPU|x86.Build.0 = Debug|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|Any CPU.ActiveCfg = Release|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|Any CPU.Build.0 = Release|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|x64.ActiveCfg = Release|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|x64.Build.0 = Release|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|x86.ActiveCfg = Release|Any CPU - {C08C6692-4818-46C1-8462-2F0CC40C9152}.Release|x86.Build.0 = Release|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|Any CPU.Build.0 = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x64.ActiveCfg = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x64.Build.0 = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x86.ActiveCfg = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x86.Build.0 = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|Any CPU.Build.0 = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x64.ActiveCfg = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x64.Build.0 = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x86.ActiveCfg = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x86.Build.0 = Debug|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|Any CPU.Build.0 = Release|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.ActiveCfg = Release|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.Build.0 = Release|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.ActiveCfg = Release|Any CPU + {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -362,7 +362,7 @@ Global {D24FCAA5-548C-4251-B226-A1B6535D0845} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {C23563DB-FE21-48E7-A411-87A109E4A899} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} - {C08C6692-4818-46C1-8462-2F0CC40C9152} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} + {654A027D-1364-4729-880B-144DFE1FF5BB} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} diff --git a/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj b/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj similarity index 74% rename from TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj rename to test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj index a52a4cda6..68eb9e9b2 100644 --- a/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj +++ b/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj @@ -17,8 +17,8 @@ - - + + diff --git a/TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs b/test/TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs similarity index 89% rename from TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs rename to test/TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs index cfa8f0fbf..67d0aa602 100644 --- a/TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs +++ b/test/TensorFlow.Kernel.UnitTest/array_ops/concat_op_test.cs @@ -1,9 +1,7 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using Tensorflow; using Tensorflow.NumPy; -using TensorFlow; using static Tensorflow.Binding; -using static Tensorflow.KerasApi; namespace TensorFlow.Kernel.UnitTest { @@ -23,14 +21,14 @@ public void testConcatEmpty() [TestMethod] public void testConcatNegativeAxis() { - var t1 = tf.constant(new int[,] {{ 1, 2, 3 }, { 4, 5, 6 } }); + var t1 = tf.constant(new int[,] { { 1, 2, 3 }, { 4, 5, 6 } }); var t2 = tf.constant(new int[,] { { 7, 8, 9 }, { 10, 11, 12 } }); var c = array_ops.concat(new[] { t1, t2 }, -2); var expected = np.array(new int[,,] { { { 1, 2, 3 }, { 4, 5, 6 } }, { { 7, 8, 9 }, { 10, 11, 12 } } }); Assert.IsTrue(Enumerable.SequenceEqual(expected.ToArray(), c.numpy().ToArray())); c = array_ops.concat(new[] { t1, t2 }, -1); - expected = np.array(new int[,] { { 1, 2, 3, 7, 8, 9 }, { 4, 5, 6, 10, 11, 12 } }); + expected = np.array(new int[,] { { 1, 2, 3, 7, 8, 9 }, { 4, 5, 6, 10, 11, 12 } }); Assert.IsTrue(Enumerable.SequenceEqual(expected.ToArray(), c.numpy().ToArray())); } @@ -54,7 +52,7 @@ public void testConcatDtype(TF_DataType dtype) [DataRow(TF_DataType.TF_INT64)] public void testConcatAxisType(TF_DataType dtype) { - var t1 = tf.constant(new int[,] { { 1, 2, 3 }, {4, 5, 6 } }); + var t1 = tf.constant(new int[,] { { 1, 2, 3 }, { 4, 5, 6 } }); var t2 = tf.constant(new int[,] { { 7, 8, 9 }, { 10, 11, 12 } }); var c = array_ops.concat(new[] { t1, t2 }, tf.constant(1, dtype: dtype)); var expected = np.array(new int[,] { { 1, 2, 3, 7, 8, 9 }, { 4, 5, 6, 10, 11, 12 } }); @@ -62,4 +60,4 @@ public void testConcatAxisType(TF_DataType dtype) } } -} +} \ No newline at end of file From 7f0161445d1142f18ca2e18504e25fcad15e1d44 Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Sun, 5 Nov 2023 21:54:56 +0800 Subject: [PATCH 150/182] fix: fix a project reference mistake --- .../TensorFlow.Kernel.UnitTest.csproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj b/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj index 68eb9e9b2..21b2731b7 100644 --- a/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj +++ b/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj @@ -17,8 +17,8 @@ + - From 94c0bb8796a06a4becb21687141f2a4451c9230e Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Sun, 5 Nov 2023 15:02:16 -0600 Subject: [PATCH 151/182] Release v0.150.0 based on tensorflowv v2.15.0. --- README.md | 19 ++++--------------- .../APIs/c_api.customize.cs | 6 +++--- .../Operations/Operation.cs | 2 +- .../Operations/handle_data_util.cs | 2 +- .../Tensorflow.Binding.csproj | 14 +++++++++----- src/TensorFlowNET.Core/ops.cs | 2 +- .../Tensorflow.Keras.csproj | 9 +++++---- src/TensorflowNET.Hub/Tensorflow.Hub.csproj | 2 +- .../Tensorflow.Console.csproj | 5 +---- .../Tensorflow.CodeGen.csproj | 1 - .../Tensorflow.UnitTest.RedistHolder.csproj | 2 +- 11 files changed, 27 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 36ec1660c..0198c873c 100644 --- a/README.md +++ b/README.md @@ -15,20 +15,6 @@ English | [中文](docs/README-CN.md) -**=========================================================** - -### [Voting: Naming Convention Approach of v1.0.0](https://github.com/SciSharp/TensorFlow.NET/issues/1074) - -Dear all, - -We would like to urge you to participate in our upcoming vote regarding the naming convention for TensorFlow.NET version 1.0.0 in [#1074](https://github.com/SciSharp/TensorFlow.NET/issues/1074). Your participation in the vote is essential to help us decide on the best approach for improving the naming convention used in previous versions. - -Thank you, - -TensorFlow.NET Authors - -**=========================================================** - *master branch and v0.100.x is corresponding to tensorflow v2.10, v0.6x branch is from tensorflow v2.6, v0.15-tensorflow1.15 is from tensorflow1.15. Please add `https://www.myget.org/F/scisharp/api/v3/index.json` to nuget source to use nightly release.* @@ -75,9 +61,12 @@ PM> Install-Package TensorFlow.Keras The second part is the computing support part. Only one of the following packages is needed, depending on your device and system. ``` -### CPU version for Windows, Linux and Mac +### CPU version for Windows and Linux PM> Install-Package SciSharp.TensorFlow.Redist +### CPU version for MacOS +PM> Install-Package SciSharp.TensorFlow.Redist-OSX + ### GPU version for Windows (CUDA and cuDNN are required) PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU diff --git a/src/TensorFlowNET.Core/APIs/c_api.customize.cs b/src/TensorFlowNET.Core/APIs/c_api.customize.cs index 510e52eb7..bee4897ee 100644 --- a/src/TensorFlowNET.Core/APIs/c_api.customize.cs +++ b/src/TensorFlowNET.Core/APIs/c_api.customize.cs @@ -8,10 +8,10 @@ namespace Tensorflow public partial class c_api { [DllImport(TensorFlowLibName)] - public static extern void TFC_SetAttr(SafeGraphHandle graph, IntPtr op, string attr_name, SafeBufferHandle attr_value_proto, SafeStatusHandle status); + public static extern void TF_SetAttr(SafeGraphHandle graph, IntPtr op, string attr_name, SafeBufferHandle attr_value_proto, SafeStatusHandle status); [DllImport(TensorFlowLibName)] - public static extern SafeBufferHandle TFC_GetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output); + public static extern SafeBufferHandle TF_GetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output); [DllImport(TensorFlowLibName)] - public static extern void TFC_SetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output, byte[] data, long proto_len, SafeStatusHandle status); + public static extern void TF_SetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output, byte[] data, long proto_len, SafeStatusHandle status); } } diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index e59c381cb..2105c53fa 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -437,7 +437,7 @@ internal void _set_attr(string attr_name, AttrValue attr_value) internal void _set_attr_with_buf(string attr_name, Buffer attr_buf) { Status status = new(); - c_api.TFC_SetAttr(graph, _handle, attr_name, attr_buf, status); + c_api.TF_SetAttr(graph, _handle, attr_name, attr_buf, status); status.Check(true); } } diff --git a/src/TensorFlowNET.Core/Operations/handle_data_util.cs b/src/TensorFlowNET.Core/Operations/handle_data_util.cs index a01efc520..363d3144e 100644 --- a/src/TensorFlowNET.Core/Operations/handle_data_util.cs +++ b/src/TensorFlowNET.Core/Operations/handle_data_util.cs @@ -51,7 +51,7 @@ public static void set_handle_data(Tensor target_t, HandleData handle_data) } Status status = new(); var proto = handle_data.ToByteArray(); - c_api.TFC_SetHandleShapeAndType(target_t.graph.c_graph, target_t._as_tf_output(), proto, proto.Length, status); + c_api.TF_SetHandleShapeAndType(target_t.graph.c_graph, target_t._as_tf_output(), proto, proto.Length, status); status.Check(true); } diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj index 85c41bd2a..42c0399da 100644 --- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj +++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj @@ -4,8 +4,8 @@ netstandard2.0;net6.0 Tensorflow.Binding Tensorflow - 2.11.0 - 0.110.4 + 2.15.0 + 0.150.0 10.0 enable Haiping Chen, Eli Belash, Yaohui Liu, Meinrad Recheis @@ -20,8 +20,11 @@ Google's TensorFlow full binding in .NET Standard. Building, training and infering deep learning models. https://tensorflownet.readthedocs.io - 0.110.3.0 + 0.150.0.0 + tf.net 0.150.x and above are based on tensorflow native 2.15.0 + * Support BERT model. + tf.net 0.110.x and above are based on tensorflow native 2.11.0 * Support RNN, LSTM model. * Support Transformer model. @@ -43,8 +46,9 @@ https://tensorflownet.readthedocs.io tf.net 0.7x.x aligns with TensorFlow v2.7.x native library. tf.net 0.10x.x aligns with TensorFlow v2.10.x native library. tf.net 0.11x.x aligns with TensorFlow v2.11.x native library. + tf.net 0.15x.x aligns with TensorFlow v2.15.x native library. - 0.110.4.0 + 0.150.0.0 LICENSE true packages @@ -176,7 +180,7 @@ https://tensorflownet.readthedocs.io - + diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index 351fd18ff..6f51150a2 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -590,7 +590,7 @@ public static bool inside_function() public static HandleData get_resource_handle_data(Tensor graph_op) { - var handle_data = c_api.TFC_GetHandleShapeAndType(graph_op.graph.c_graph, graph_op._as_tf_output()); + var handle_data = c_api.TF_GetHandleShapeAndType(graph_op.graph.c_graph, graph_op._as_tf_output()); try{ var handle_str = c_api.ByteStringPiece(handle_data.DangerousGetHandle() == IntPtr.Zero ? null : new Buffer(handle_data)); return HandleData.Parser.ParseFrom(handle_str); diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj index a0ee22284..eb8ebf93c 100644 --- a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -7,7 +7,7 @@ enable Tensorflow.Keras AnyCPU;x64 - 0.11.4 + 0.15.0 Haiping Chen Keras for .NET Apache 2.0, Haiping Chen since 2018 @@ -30,6 +30,7 @@ * Fixed memory leak for YOLOv3 model. * Support RNN and LSTM models * Support Transformer model + * Support BERT model Keras for .NET @@ -42,8 +43,8 @@ Keras is an API designed for human beings, not machines. Keras follows best prac Git False Open.snk - 0.11.4.0 - 0.11.4.0 + 0.15.0.0 + 0.15.0.0 LICENSE Debug;Release;GPU @@ -143,7 +144,7 @@ Keras is an API designed for human beings, not machines. Keras follows best prac - + diff --git a/src/TensorflowNET.Hub/Tensorflow.Hub.csproj b/src/TensorflowNET.Hub/Tensorflow.Hub.csproj index 3c09f808e..efa37598d 100644 --- a/src/TensorflowNET.Hub/Tensorflow.Hub.csproj +++ b/src/TensorflowNET.Hub/Tensorflow.Hub.csproj @@ -26,7 +26,7 @@ - + diff --git a/tools/TensorFlowNET.Console/Tensorflow.Console.csproj b/tools/TensorFlowNET.Console/Tensorflow.Console.csproj index ecc2d30b5..bb60b6b63 100644 --- a/tools/TensorFlowNET.Console/Tensorflow.Console.csproj +++ b/tools/TensorFlowNET.Console/Tensorflow.Console.csproj @@ -19,13 +19,10 @@ AnyCPU - - - - + diff --git a/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index 03195e6ac..2afc68a3c 100644 --- a/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -9,7 +9,6 @@ - diff --git a/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj b/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj index 1ca387dbb..0d1018cab 100644 --- a/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj +++ b/tools/Tensorflow.UnitTest.RedistHolder/Tensorflow.UnitTest.RedistHolder.csproj @@ -5,7 +5,7 @@ - + From 53bd70bed3828a81e83bc1a2edbe1b3cbfab197a Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Tue, 7 Nov 2023 22:54:08 +0800 Subject: [PATCH 152/182] fix: fix the validation_pack when multiple input --- src/TensorFlowNET.Core/Util/Data.cs | 26 ++++++++++++++----- .../Engine/DataAdapters/DataAdapter.cs | 14 +++++++--- .../Engine/Model.Evaluate.cs | 8 +++++- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 23 +++++++++++++--- 4 files changed, 56 insertions(+), 15 deletions(-) diff --git a/src/TensorFlowNET.Core/Util/Data.cs b/src/TensorFlowNET.Core/Util/Data.cs index a14c69b18..4e5a65434 100644 --- a/src/TensorFlowNET.Core/Util/Data.cs +++ b/src/TensorFlowNET.Core/Util/Data.cs @@ -1,4 +1,5 @@ -using Tensorflow.NumPy; +using OneOf; +using Tensorflow.NumPy; namespace Tensorflow.Util { @@ -8,10 +9,10 @@ namespace Tensorflow.Util /// public class ValidationDataPack { - public NDArray val_x; + public OneOf val_x; public NDArray val_y; public NDArray val_sample_weight = null; - + public bool val_x_is_array = false; public ValidationDataPack((NDArray, NDArray) validation_data) { this.val_x = validation_data.Item1; @@ -27,15 +28,17 @@ public ValidationDataPack((NDArray, NDArray, NDArray) validation_data) public ValidationDataPack((IEnumerable, NDArray) validation_data) { - this.val_x = validation_data.Item1.ToArray()[0]; + this.val_x = validation_data.Item1.ToArray(); this.val_y = validation_data.Item2; + val_x_is_array = true; } public ValidationDataPack((IEnumerable, NDArray, NDArray) validation_data) { - this.val_x = validation_data.Item1.ToArray()[0]; + this.val_x = validation_data.Item1.ToArray(); this.val_y = validation_data.Item2; this.val_sample_weight = validation_data.Item3; + val_x_is_array = true; } public static implicit operator ValidationDataPack((NDArray, NDArray) validation_data) @@ -52,15 +55,24 @@ public static implicit operator ValidationDataPack((IEnumerable, NDArra public void Deconstruct(out NDArray val_x, out NDArray val_y) { - val_x = this.val_x; + val_x = this.val_x.AsT0; val_y = this.val_y; } public void Deconstruct(out NDArray val_x, out NDArray val_y, out NDArray val_sample_weight) { - val_x = this.val_x; + val_x = this.val_x.AsT0; + val_y = this.val_y; + val_sample_weight = this.val_sample_weight; + } + + // add a unuse parameter to make it different from Deconstruct(out NDArray val_x, out NDArray val_y, out NDArray val_sample_weight) + public void Deconstruct(out NDArray[] val_x_array, out NDArray val_y, out NDArray val_sample_weight, out NDArray unuse) + { + val_x_array = this.val_x.AsT1; val_y = this.val_y; val_sample_weight = this.val_sample_weight; + unuse = null; } } } diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs index b2750496a..590f30a78 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs @@ -92,9 +92,17 @@ public static ((IEnumerable, NDArray, NDArray), ValidationDataPack) tra var train_y = y[new Slice(0, train_count)]; var val_x = x.Select(x => x[new Slice(train_count)] as NDArray); var val_y = y[new Slice(train_count)]; - NDArray tmp_sample_weight = sample_weight; - sample_weight = sample_weight[new Slice(0, train_count)]; - ValidationDataPack validation_data = (val_x, val_y, tmp_sample_weight[new Slice(train_count)]); + + ValidationDataPack validation_data; + if (sample_weight != null) + { + validation_data = (val_x, val_y, sample_weight[new Slice(train_count)]); + sample_weight = sample_weight[new Slice(0, train_count)]; + } + else + { + validation_data = (val_x, val_y); + } return ((train_x, train_y, sample_weight), validation_data); } } diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index 474d5e5a5..b3264429e 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -70,13 +70,19 @@ public Dictionary evaluate(NDArray x, NDArray y, return evaluate(data_handler, callbacks, is_val, test_function); } - public Dictionary evaluate(IEnumerable x, Tensor y, int verbose = 1, bool is_val = false) + public Dictionary evaluate( + IEnumerable x, + Tensor y, + int verbose = 1, + NDArray sample_weight = null, + bool is_val = false) { var data_handler = new DataHandler(new DataHandlerArgs { X = new Tensors(x.ToArray()), Y = y, Model = this, + SampleWeight = sample_weight, StepsPerExecution = _steps_per_execution }); diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index d61211c71..13a1b63bc 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -7,6 +7,7 @@ using System.Diagnostics; using Tensorflow.Keras.Callbacks; using Tensorflow.Util; +using OneOf; namespace Tensorflow.Keras.Engine { @@ -287,10 +288,24 @@ History FitInternal(DataHandler data_handler, int epochs, int verbose, List val_logs; + if (!validation_data.val_x_is_array) + { + (val_x, val_y, val_sample_weight) = validation_data; + // Because evaluate calls call_test_batch_end, this interferes with our output on the screen + // so we need to pass a is_val parameter to stop on_test_batch_end + val_logs = evaluate(val_x, val_y, sample_weight: val_sample_weight, is_val: true); + + } + else + { + (val_x_array, val_y, val_sample_weight, _) = validation_data; + val_logs = evaluate(val_x_array, val_y, sample_weight: val_sample_weight, is_val: true); + } foreach (var log in val_logs) { logs["val_" + log.Key] = log.Value; From d453fb6611f4acb3ab405579ae804279d6e07cbe Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Tue, 7 Nov 2023 23:34:37 +0800 Subject: [PATCH 153/182] refactor: declare some field of ValidationPack as internal --- src/TensorFlowNET.Core/Util/Data.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/TensorFlowNET.Core/Util/Data.cs b/src/TensorFlowNET.Core/Util/Data.cs index 4e5a65434..388efc50f 100644 --- a/src/TensorFlowNET.Core/Util/Data.cs +++ b/src/TensorFlowNET.Core/Util/Data.cs @@ -9,9 +9,9 @@ namespace Tensorflow.Util /// public class ValidationDataPack { - public OneOf val_x; - public NDArray val_y; - public NDArray val_sample_weight = null; + internal OneOf val_x; + internal NDArray val_y; + internal NDArray val_sample_weight = null; public bool val_x_is_array = false; public ValidationDataPack((NDArray, NDArray) validation_data) { @@ -33,7 +33,7 @@ public ValidationDataPack((IEnumerable, NDArray) validation_data) val_x_is_array = true; } - public ValidationDataPack((IEnumerable, NDArray, NDArray) validation_data) + internal ValidationDataPack((IEnumerable, NDArray, NDArray) validation_data) { this.val_x = validation_data.Item1.ToArray(); this.val_y = validation_data.Item2; From 47e9019a187744bf31e315525ffe352dad36a00c Mon Sep 17 00:00:00 2001 From: Wanglongzhi2001 <583087864@qq.com> Date: Tue, 7 Nov 2023 23:36:15 +0800 Subject: [PATCH 154/182] refactor: fix a typo --- src/TensorFlowNET.Core/Util/Data.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/Util/Data.cs b/src/TensorFlowNET.Core/Util/Data.cs index 388efc50f..fe3466ed0 100644 --- a/src/TensorFlowNET.Core/Util/Data.cs +++ b/src/TensorFlowNET.Core/Util/Data.cs @@ -33,7 +33,7 @@ public ValidationDataPack((IEnumerable, NDArray) validation_data) val_x_is_array = true; } - internal ValidationDataPack((IEnumerable, NDArray, NDArray) validation_data) + public ValidationDataPack((IEnumerable, NDArray, NDArray) validation_data) { this.val_x = validation_data.Item1.ToArray(); this.val_y = validation_data.Item2; From 2a377e2f91b40083f5de86f01b57b32bad5a5932 Mon Sep 17 00:00:00 2001 From: Alexander Novikov Date: Tue, 7 Nov 2023 19:23:34 +0000 Subject: [PATCH 155/182] tests are passing --- .../Variables/variables.py.cs | 8 ---- test/TensorFlowNET.UnitTest/PythonTest.cs | 40 ++++++++++++------- .../Training/GradientDescentOptimizerTests.cs | 33 +++++++++------ 3 files changed, 46 insertions(+), 35 deletions(-) diff --git a/src/TensorFlowNET.Core/Variables/variables.py.cs b/src/TensorFlowNET.Core/Variables/variables.py.cs index f3ae248e6..91f57e292 100644 --- a/src/TensorFlowNET.Core/Variables/variables.py.cs +++ b/src/TensorFlowNET.Core/Variables/variables.py.cs @@ -154,13 +154,5 @@ public static Operation _safe_initial_value_from_op(string name, Operation op, D return op; } - - public static Tensor global_variables_initializer() - { - // if context.executing_eagerly(): - // return control_flow_ops.no_op(name = "global_variables_initializer") - var group = variables_initializer(global_variables().ToArray()); - return group; - } } } diff --git a/test/TensorFlowNET.UnitTest/PythonTest.cs b/test/TensorFlowNET.UnitTest/PythonTest.cs index 12fd72360..090ef097c 100644 --- a/test/TensorFlowNET.UnitTest/PythonTest.cs +++ b/test/TensorFlowNET.UnitTest/PythonTest.cs @@ -6,6 +6,7 @@ using System.Linq; using Tensorflow; using static Tensorflow.Binding; +using System.Collections.Generic; namespace TensorFlowNET.UnitTest { @@ -144,11 +145,12 @@ public void assertAllClose(double value, NDArray array2, double eps = 1e-5) Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); } - private class CollectionComparer : System.Collections.IComparer + private class CollectionComparer : IComparer { private readonly double _epsilon; - public CollectionComparer(double eps = 1e-06) { + public CollectionComparer(double eps = 1e-06) + { _epsilon = eps; } public int Compare(object x, object y) @@ -166,13 +168,15 @@ public int Compare(object x, object y) } public void assertAllCloseAccordingToType( - T[] expected, - T[] given, + ICollection expected, + ICollection given, double eps = 1e-6, float float_eps = 1e-6f) { // TODO: check if any of arguments is not double and change toletance - CollectionAssert.AreEqual(expected, given, new CollectionComparer(eps)); + // remove givenAsDouble and cast expected instead + var givenAsDouble = given.Select(x => Convert.ToDouble(x)).ToArray(); + CollectionAssert.AreEqual(expected, givenAsDouble, new CollectionComparer(eps)); } public void assertProtoEquals(object toProto, object o) @@ -241,17 +245,25 @@ public T evaluate(Tensor tensor) // return self._eval_helper(tensors) // else: { - var sess = tf.Session(); + var sess = tf.get_default_session(); var ndarray = tensor.eval(sess); - if (typeof(T) == typeof(double)) + if (typeof(T) == typeof(double) + || typeof(T) == typeof(float) + || typeof(T) == typeof(int)) + { + result = Convert.ChangeType(ndarray, typeof(T)); + } + else if (typeof(T) == typeof(double[])) + { + result = ndarray.ToMultiDimArray(); + } + else if (typeof(T) == typeof(float[])) { - double x = ndarray; - result = x; + result = ndarray.ToMultiDimArray(); } - else if (typeof(T) == typeof(int)) + else if (typeof(T) == typeof(int[])) { - int x = ndarray; - result = x; + result = ndarray.ToMultiDimArray(); } else { @@ -457,12 +469,12 @@ private Session _get_cached_session( else { - if (crash_if_inconsistent_args && !self._cached_graph.Equals(graph)) + if (crash_if_inconsistent_args && self._cached_graph != null && !self._cached_graph.Equals(graph)) throw new ValueError(@"The graph used to get the cached session is different than the one that was used to create the session. Maybe create a new session with self.session()"); - if (crash_if_inconsistent_args && !self._cached_config.Equals(config)) + if (crash_if_inconsistent_args && self._cached_config != null && !self._cached_config.Equals(config)) { throw new ValueError(@"The config used to get the cached session is different than the one that was used to create the diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs index 977544ae9..3059068f4 100644 --- a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -1,8 +1,6 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; using System.Linq; -using System.Runtime.Intrinsics.X86; -using System.Security.AccessControl; using Tensorflow.NumPy; using TensorFlowNET.UnitTest; using static Tensorflow.Binding; @@ -12,18 +10,23 @@ namespace Tensorflow.Keras.UnitTest.Optimizers [TestClass] public class GradientDescentOptimizerTest : PythonTest { - private void TestBasicGeneric() where T : struct + private static TF_DataType GetTypeForNumericType() where T : struct { - var dtype = Type.GetTypeCode(typeof(T)) switch + return Type.GetTypeCode(typeof(T)) switch { TypeCode.Single => np.float32, TypeCode.Double => np.float64, _ => throw new NotImplementedException(), }; + } + + private void TestBasicGeneric() where T : struct + { + var dtype = GetTypeForNumericType(); // train.GradientDescentOptimizer is V1 only API. tf.Graph().as_default(); - using (self.cached_session()) + using (var sess = self.cached_session()) { var var0 = tf.Variable(new[] { 1.0, 2.0 }, dtype: dtype); var var1 = tf.Variable(new[] { 3.0, 4.0 }, dtype: dtype); @@ -36,21 +39,25 @@ private void TestBasicGeneric() where T : struct }; var sgd_op = optimizer.apply_gradients(grads_and_vars); - var global_variables = variables.global_variables_initializer(); - self.evaluate(global_variables); + var global_variables = tf.global_variables_initializer(); + sess.run(global_variables); + // Fetch params to validate initial values + var initialVar0 = sess.run(var0); + var valu = var0.eval(sess); + var initialVar1 = sess.run(var1); // TODO: use self.evaluate instead of self.evaluate - self.assertAllCloseAccordingToType(new double[] { 1.0, 2.0 }, self.evaluate(var0)); - self.assertAllCloseAccordingToType(new double[] { 3.0, 4.0 }, self.evaluate(var1)); + self.assertAllCloseAccordingToType(new[] { 1.0, 2.0 }, self.evaluate(var0)); + self.assertAllCloseAccordingToType(new[] { 3.0, 4.0 }, self.evaluate(var1)); // Run 1 step of sgd sgd_op.run(); // Validate updated params self.assertAllCloseAccordingToType( - new double[] { 1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1 }, - self.evaluate(var0)); + new[] { 1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1 }, + self.evaluate(var0)); self.assertAllCloseAccordingToType( - new double[] { 3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01 }, - self.evaluate(var1)); + new[] { 3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01 }, + self.evaluate(var1)); // TODO: self.assertEqual(0, len(optimizer.variables())); } } From f7b8dba00b2465114926072d4a82924dc35596d7 Mon Sep 17 00:00:00 2001 From: Alexander Date: Wed, 8 Nov 2023 15:16:02 +0000 Subject: [PATCH 156/182] small fixes --- .../Training/GradientDescentOptimizerTests.cs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs index 3059068f4..1a650a864 100644 --- a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -1,4 +1,5 @@ -using Microsoft.VisualStudio.TestTools.UnitTesting; +using Microsoft.VisualStudio.TestPlatform.Utilities; +using Microsoft.VisualStudio.TestTools.UnitTesting; using System; using System.Linq; using Tensorflow.NumPy; @@ -20,7 +21,7 @@ private static TF_DataType GetTypeForNumericType() where T : struct }; } - private void TestBasicGeneric() where T : struct + private void TestBasic() where T : struct { var dtype = GetTypeForNumericType(); @@ -42,11 +43,9 @@ private void TestBasicGeneric() where T : struct var global_variables = tf.global_variables_initializer(); sess.run(global_variables); - // Fetch params to validate initial values var initialVar0 = sess.run(var0); - var valu = var0.eval(sess); var initialVar1 = sess.run(var1); - // TODO: use self.evaluate instead of self.evaluate + // Fetch params to validate initial values self.assertAllCloseAccordingToType(new[] { 1.0, 2.0 }, self.evaluate(var0)); self.assertAllCloseAccordingToType(new[] { 3.0, 4.0 }, self.evaluate(var1)); // Run 1 step of sgd @@ -66,10 +65,9 @@ private void TestBasicGeneric() where T : struct public void TestBasic() { //TODO: add np.half - TestBasicGeneric(); - TestBasicGeneric(); + TestBasic(); + TestBasic(); } - } } From c906f46aadaf2e2f0d1769f026270ba912ef95be Mon Sep 17 00:00:00 2001 From: Alexander Date: Wed, 8 Nov 2023 15:24:13 +0000 Subject: [PATCH 157/182] learning rate test --- .../Training/GradientDescentOptimizerTests.cs | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs index 1a650a864..92fe97706 100644 --- a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -1,6 +1,7 @@ using Microsoft.VisualStudio.TestPlatform.Utilities; using Microsoft.VisualStudio.TestTools.UnitTesting; using System; +using System.Diagnostics; using System.Linq; using Tensorflow.NumPy; using TensorFlowNET.UnitTest; @@ -69,5 +70,53 @@ public void TestBasic() TestBasic(); } + private void TestTensorLearningRate() where T : struct + { + var dtype = GetTypeForNumericType(); + + // train.GradientDescentOptimizer is V1 only API. + tf.Graph().as_default(); + using (var sess = self.cached_session()) + { + var var0 = tf.Variable(new[] { 1.0, 2.0 }, dtype: dtype); + var var1 = tf.Variable(new[] { 3.0, 4.0 }, dtype: dtype); + var grads0 = tf.constant(new[] { 0.1, 0.1 }, dtype: dtype); + var grads1 = tf.constant(new[] { 0.01, 0.01 }, dtype: dtype); + var lrate = constant_op.constant(3.0); + var grads_and_vars = new[] { + Tuple.Create(grads0, var0 as IVariableV1), + Tuple.Create(grads1, var1 as IVariableV1) + }; + var sgd_op = tf.train.GradientDescentOptimizer(lrate) + .apply_gradients(grads_and_vars); + + var global_variables = tf.global_variables_initializer(); + sess.run(global_variables); + + var initialVar0 = sess.run(var0); + var initialVar1 = sess.run(var1); + // Fetch params to validate initial values + self.assertAllCloseAccordingToType(new[] { 1.0, 2.0 }, self.evaluate(var0)); + self.assertAllCloseAccordingToType(new[] { 3.0, 4.0 }, self.evaluate(var1)); + // Run 1 step of sgd + sgd_op.run(); + // Validate updated params + self.assertAllCloseAccordingToType( + new[] { 1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1 }, + self.evaluate(var0)); + self.assertAllCloseAccordingToType( + new[] { 3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01 }, + self.evaluate(var1)); + // TODO: self.assertEqual(0, len(optimizer.variables())); + } + } + + [TestMethod] + public void TestTensorLearningRate() + { + //TODO: add np.half + TestTensorLearningRate(); + TestTensorLearningRate(); + } } } From 149caaec11b649e6f9e85320a1f18689c32cae6c Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 10 Nov 2023 02:44:01 +0000 Subject: [PATCH 158/182] test ci --- .../Training/GradientDescentOptimizerTests.cs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs index 92fe97706..98738528d 100644 --- a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -27,8 +27,8 @@ private void TestBasic() where T : struct var dtype = GetTypeForNumericType(); // train.GradientDescentOptimizer is V1 only API. - tf.Graph().as_default(); - using (var sess = self.cached_session()) + //tf.Graph().as_default(); + /*using (var sess = self.cached_session()) { var var0 = tf.Variable(new[] { 1.0, 2.0 }, dtype: dtype); var var1 = tf.Variable(new[] { 3.0, 4.0 }, dtype: dtype); @@ -59,7 +59,7 @@ private void TestBasic() where T : struct new[] { 3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01 }, self.evaluate(var1)); // TODO: self.assertEqual(0, len(optimizer.variables())); - } + }*/ } [TestMethod] @@ -67,7 +67,7 @@ public void TestBasic() { //TODO: add np.half TestBasic(); - TestBasic(); + // TestBasic(); } private void TestTensorLearningRate() where T : struct @@ -115,8 +115,8 @@ private void TestTensorLearningRate() where T : struct public void TestTensorLearningRate() { //TODO: add np.half - TestTensorLearningRate(); - TestTensorLearningRate(); + // TestTensorLearningRate(); + // TestTensorLearningRate(); } } } From 2cb5fd66f842832a2254155f296a54764473f5cd Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 10 Nov 2023 13:53:40 +0000 Subject: [PATCH 159/182] new graph --- .../Training/BasicLinearModel.cs | 2 ++ .../Training/GradientDescentOptimizerTests.cs | 17 +++++++---------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs b/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs index 1283ecaf2..a37f28920 100644 --- a/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs +++ b/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs @@ -15,6 +15,8 @@ public class BasicLinearModel [TestMethod] public void LinearRegression() { + tf.Graph().as_default(); + // Initialize the weights to `5.0` and the bias to `0.0` // In practice, these should be initialized to random values (for example, with `tf.random.normal`) var W = tf.Variable(5.0f); diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs index 98738528d..1632f1e73 100644 --- a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -1,8 +1,5 @@ -using Microsoft.VisualStudio.TestPlatform.Utilities; -using Microsoft.VisualStudio.TestTools.UnitTesting; +using Microsoft.VisualStudio.TestTools.UnitTesting; using System; -using System.Diagnostics; -using System.Linq; using Tensorflow.NumPy; using TensorFlowNET.UnitTest; using static Tensorflow.Binding; @@ -27,8 +24,8 @@ private void TestBasic() where T : struct var dtype = GetTypeForNumericType(); // train.GradientDescentOptimizer is V1 only API. - //tf.Graph().as_default(); - /*using (var sess = self.cached_session()) + tf.Graph().as_default(); + using (var sess = self.cached_session()) { var var0 = tf.Variable(new[] { 1.0, 2.0 }, dtype: dtype); var var1 = tf.Variable(new[] { 3.0, 4.0 }, dtype: dtype); @@ -59,7 +56,7 @@ private void TestBasic() where T : struct new[] { 3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01 }, self.evaluate(var1)); // TODO: self.assertEqual(0, len(optimizer.variables())); - }*/ + } } [TestMethod] @@ -67,7 +64,7 @@ public void TestBasic() { //TODO: add np.half TestBasic(); - // TestBasic(); + TestBasic(); } private void TestTensorLearningRate() where T : struct @@ -115,8 +112,8 @@ private void TestTensorLearningRate() where T : struct public void TestTensorLearningRate() { //TODO: add np.half - // TestTensorLearningRate(); - // TestTensorLearningRate(); + TestTensorLearningRate(); + TestTensorLearningRate(); } } } From 09d466d697e58d97598bbee248ffd7ceb8a7be92 Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 10 Nov 2023 14:00:51 +0000 Subject: [PATCH 160/182] ci test --- test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs b/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs index a37f28920..d0da1d5b9 100644 --- a/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs +++ b/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs @@ -15,7 +15,9 @@ public class BasicLinearModel [TestMethod] public void LinearRegression() { - tf.Graph().as_default(); + var graph = tf.Graph().as_default(); + var sess = new Session(graph); + sess.as_default(); // Initialize the weights to `5.0` and the bias to `0.0` // In practice, these should be initialized to random values (for example, with `tf.random.normal`) From c5b4928bd6eaa9fcff9d0e71932cd7c1587d1eb6 Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 10 Nov 2023 14:28:41 +0000 Subject: [PATCH 161/182] correct namespace passing --- test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs | 4 ---- .../Training/GradientDescentOptimizerTests.cs | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs b/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs index d0da1d5b9..1283ecaf2 100644 --- a/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs +++ b/test/TensorFlowNET.UnitTest/Training/BasicLinearModel.cs @@ -15,10 +15,6 @@ public class BasicLinearModel [TestMethod] public void LinearRegression() { - var graph = tf.Graph().as_default(); - var sess = new Session(graph); - sess.as_default(); - // Initialize the weights to `5.0` and the bias to `0.0` // In practice, these should be initialized to random values (for example, with `tf.random.normal`) var W = tf.Variable(5.0f); diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs index 1632f1e73..d766890b2 100644 --- a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -1,10 +1,10 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; +using Tensorflow; using Tensorflow.NumPy; -using TensorFlowNET.UnitTest; using static Tensorflow.Binding; -namespace Tensorflow.Keras.UnitTest.Optimizers +namespace TensorFlowNET.UnitTest.Training { [TestClass] public class GradientDescentOptimizerTest : PythonTest From fc8f493187bd382bc994c4f79c17b369611cca36 Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 10 Nov 2023 20:47:49 +0000 Subject: [PATCH 162/182] common assembly for python test --- TensorFlow.NET.sln | 23 +- .../PythonTest.cs | 448 ------------------ .../TensorFlowNET.Graph.UnitTest.csproj | 1 + .../Tensorflow.Binding.UnitTest.csproj | 1 + .../PythonTest.cs | 3 - .../Tensorflow.UnitTest.csproj | 24 + 6 files changed, 48 insertions(+), 452 deletions(-) delete mode 100644 test/TensorFlowNET.Graph.UnitTest/PythonTest.cs rename test/{TensorFlowNET.UnitTest => Tensorflow.UnitTest}/PythonTest.cs (99%) create mode 100644 test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 214b039d4..e0c273568 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -39,7 +39,9 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "too EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Console", "tools\TensorFlowNET.Console\Tensorflow.Console.csproj", "{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlow.Kernel.UnitTest", "test\TensorFlow.Kernel.UnitTest\TensorFlow.Kernel.UnitTest.csproj", "{654A027D-1364-4729-880B-144DFE1FF5BB}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlow.Kernel.UnitTest", "test\TensorFlow.Kernel.UnitTest\TensorFlow.Kernel.UnitTest.csproj", "{654A027D-1364-4729-880B-144DFE1FF5BB}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow.UnitTest", "test\Tensorflow.UnitTest\Tensorflow.UnitTest.csproj", "{A73DF5A6-866E-4AED-9017-AA2EE86368C4}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -342,6 +344,24 @@ Global {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.Build.0 = Release|Any CPU {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.ActiveCfg = Release|Any CPU {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.Build.0 = Release|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x64.ActiveCfg = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x64.Build.0 = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x86.ActiveCfg = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x86.Build.0 = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|Any CPU.Build.0 = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x64.ActiveCfg = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x64.Build.0 = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x86.ActiveCfg = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x86.Build.0 = Debug|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|Any CPU.Build.0 = Release|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x64.ActiveCfg = Release|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x64.Build.0 = Release|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x86.ActiveCfg = Release|Any CPU + {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -363,6 +383,7 @@ Global {C23563DB-FE21-48E7-A411-87A109E4A899} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} {654A027D-1364-4729-880B-144DFE1FF5BB} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} + {A73DF5A6-866E-4AED-9017-AA2EE86368C4} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} diff --git a/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs b/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs deleted file mode 100644 index ccf59f5ae..000000000 --- a/test/TensorFlowNET.Graph.UnitTest/PythonTest.cs +++ /dev/null @@ -1,448 +0,0 @@ -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Newtonsoft.Json.Linq; -using Tensorflow.NumPy; -using System; -using System.Collections; -using System.Linq; -using Tensorflow; -using static Tensorflow.Binding; -using OneOf.Types; -using System.Collections.Generic; - -namespace TensorFlowNET.UnitTest -{ - /// - /// Use as base class for test classes to get additional assertions - /// - public class PythonTest - { - #region python compatibility layer - protected PythonTest self { get => this; } - protected int None => -1; - #endregion - - #region pytest assertions - - public void assertItemsEqual(ICollection given, ICollection expected) - { - if (given is Hashtable && expected is Hashtable) - { - Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); - return; - } - Assert.IsNotNull(expected); - Assert.IsNotNull(given); - var e = expected.OfType().ToArray(); - var g = given.OfType().ToArray(); - Assert.AreEqual(e.Length, g.Length, $"The collections differ in length expected {e.Length} but got {g.Length}"); - for (int i = 0; i < e.Length; i++) - { - /*if (g[i] is NDArray && e[i] is NDArray) - assertItemsEqual((g[i] as NDArray).GetData(), (e[i] as NDArray).GetData()); - else*/ - if (e[i] is ICollection && g[i] is ICollection) - assertEqual(g[i], e[i]); - else - Assert.AreEqual(e[i], g[i], $"Items differ at index {i}, expected {e[i]} but got {g[i]}"); - } - } - - public void assertAllEqual(ICollection given, ICollection expected) - { - assertItemsEqual(given, expected); - } - - public void assertFloat32Equal(float expected, float actual, string msg) - { - float eps = 1e-6f; - Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); - } - - public void assertFloat64Equal(double expected, double actual, string msg) - { - double eps = 1e-16f; - Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); - } - - public void assertEqual(object given, object expected) - { - /*if (given is NDArray && expected is NDArray) - { - assertItemsEqual((given as NDArray).GetData(), (expected as NDArray).GetData()); - return; - }*/ - if (given is Hashtable && expected is Hashtable) - { - Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); - return; - } - if (given is ICollection && expected is ICollection) - { - assertItemsEqual(given as ICollection, expected as ICollection); - return; - } - if (given is float && expected is float) - { - assertFloat32Equal((float)expected, (float)given, ""); - return; - } - if (given is double && expected is double) - { - assertFloat64Equal((double)expected, (double)given, ""); - return; - } - Assert.AreEqual(expected, given); - } - - public void assertEquals(object given, object expected) - { - assertEqual(given, expected); - } - - public void assert(object given) - { - if (given is bool) - Assert.IsTrue((bool)given); - Assert.IsNotNull(given); - } - - public void assertIsNotNone(object given) - { - Assert.IsNotNull(given); - } - - public void assertFalse(bool cond) - { - Assert.IsFalse(cond); - } - - public void assertTrue(bool cond) - { - Assert.IsTrue(cond); - } - - public void assertAllClose(NDArray array1, NDArray array2, double eps = 1e-5) - { - Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); - } - - public void assertAllClose(double value, NDArray array2, double eps = 1e-5) - { - var array1 = np.ones_like(array2) * value; - // Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); - } - - public void assertProtoEquals(object toProto, object o) - { - throw new NotImplementedException(); - } - - #endregion - - #region tensor evaluation and test session - - private Session _cached_session = null; - private Graph _cached_graph = null; - private object _cached_config = null; - private bool _cached_force_gpu = false; - - private void _ClearCachedSession() - { - if (self._cached_session != null) - { - self._cached_session.Dispose(); - self._cached_session = null; - } - } - - - //protected object _eval_helper(Tensor[] tensors) - //{ - // if (tensors == null) - // return null; - // return nest.map_structure(self._eval_tensor, tensors); - //} - - protected object _eval_tensor(object tensor) - { - if (tensor == null) - return None; - //else if (callable(tensor)) - // return self._eval_helper(tensor()) - else - { - try - { - //TODO: - // if sparse_tensor.is_sparse(tensor): - // return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values, - // tensor.dense_shape) - //return (tensor as Tensor).numpy(); - } - catch (Exception) - { - throw new ValueError("Unsupported type: " + tensor.GetType()); - } - return null; - } - } - - /// - /// This function is used in many original tensorflow unit tests to evaluate tensors - /// in a test session with special settings (for instance constant folding off) - /// - /// - public T evaluate(Tensor tensor) - { - object result = null; - // if context.executing_eagerly(): - // return self._eval_helper(tensors) - // else: - { - var sess = tf.Session(); - var ndarray = tensor.eval(sess); - if (typeof(T) == typeof(double)) - { - double x = ndarray; - result = x; - } - else if (typeof(T) == typeof(int)) - { - int x = ndarray; - result = x; - } - else - { - result = ndarray; - } - - return (T)result; - } - } - - ///Returns a TensorFlow Session for use in executing tests. - public Session cached_session( - Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) - { - // This method behaves differently than self.session(): for performance reasons - // `cached_session` will by default reuse the same session within the same - // test.The session returned by this function will only be closed at the end - // of the test(in the TearDown function). - - // Use the `use_gpu` and `force_gpu` options to control where ops are run.If - // `force_gpu` is True, all ops are pinned to `/ device:GPU:0`. Otherwise, if - // `use_gpu` is True, TensorFlow tries to run as many ops on the GPU as - // possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to - // the CPU. - - // Example: - // python - // class MyOperatorTest(test_util.TensorFlowTestCase) : - // def testMyOperator(self): - // with self.cached_session() as sess: - // valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] - // result = MyOperator(valid_input).eval() - // self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] - // invalid_input = [-1.0, 2.0, 7.0] - // with self.assertRaisesOpError("negative input not supported"): - // MyOperator(invalid_input).eval() - - - // Args: - // graph: Optional graph to use during the returned session. - // config: An optional config_pb2.ConfigProto to use to configure the - // session. - // use_gpu: If True, attempt to run as many ops as possible on GPU. - // force_gpu: If True, pin all ops to `/device:GPU:0`. - - // Yields: - // A Session object that should be used as a context manager to surround - // the graph building and execution code in a test case. - - - // TODO: - // if context.executing_eagerly(): - // return self._eval_helper(tensors) - // else: - { - var sess = self._get_cached_session( - graph, config, force_gpu, crash_if_inconsistent_args: true); - using var cached = self._constrain_devices_and_set_default(sess, use_gpu, force_gpu); - return cached; - } - } - - //Returns a TensorFlow Session for use in executing tests. - public Session session(Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) - { - //Note that this will set this session and the graph as global defaults. - - //Use the `use_gpu` and `force_gpu` options to control where ops are run.If - //`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if - //`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as - //possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to - //the CPU. - - //Example: - //```python - //class MyOperatorTest(test_util.TensorFlowTestCase): - // def testMyOperator(self): - // with self.session(use_gpu= True): - // valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] - // result = MyOperator(valid_input).eval() - // self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] - // invalid_input = [-1.0, 2.0, 7.0] - // with self.assertRaisesOpError("negative input not supported"): - // MyOperator(invalid_input).eval() - //``` - - //Args: - // graph: Optional graph to use during the returned session. - // config: An optional config_pb2.ConfigProto to use to configure the - // session. - // use_gpu: If True, attempt to run as many ops as possible on GPU. - // force_gpu: If True, pin all ops to `/device:GPU:0`. - - //Yields: - // A Session object that should be used as a context manager to surround - // the graph building and execution code in a test case. - - Session s = null; - //if (context.executing_eagerly()) - // yield None - //else - //{ - s = self._create_session(graph, config, force_gpu); - //} - return s.as_default(); - } - - private Session _constrain_devices_and_set_default(Session sess, bool use_gpu, bool force_gpu) - { - // Set the session and its graph to global default and constrain devices.""" - if (tf.executing_eagerly()) - return null; - else { - sess.graph.as_default(); - sess.as_default(); - { - if (force_gpu) - { - // TODO: - - // Use the name of an actual device if one is detected, or - // '/device:GPU:0' otherwise - /* var gpu_name = gpu_device_name(); - if (!gpu_name) - gpu_name = "/device:GPU:0" - using (sess.graph.device(gpu_name)) { - yield return sess; - }*/ - return sess; - } - else if (use_gpu) - return sess; - else - using (sess.graph.device("/device:CPU:0")) - return sess; - } - - } - } - - // See session() for details. - private Session _create_session(Graph graph, object cfg, bool forceGpu) - { - var prepare_config = new Func((config) => - { - // """Returns a config for sessions. - // Args: - // config: An optional config_pb2.ConfigProto to use to configure the - // session. - // Returns: - // A config_pb2.ConfigProto object. - - //TODO: config - - // # use_gpu=False. Currently many tests rely on the fact that any device - // # will be used even when a specific device is supposed to be used. - // allow_soft_placement = not force_gpu - // if config is None: - // config = config_pb2.ConfigProto() - // config.allow_soft_placement = allow_soft_placement - // config.gpu_options.per_process_gpu_memory_fraction = 0.3 - // elif not allow_soft_placement and config.allow_soft_placement: - // config_copy = config_pb2.ConfigProto() - // config_copy.CopyFrom(config) - // config = config_copy - // config.allow_soft_placement = False - // # Don't perform optimizations for tests so we don't inadvertently run - // # gpu ops on cpu - // config.graph_options.optimizer_options.opt_level = -1 - // # Disable Grappler constant folding since some tests & benchmarks - // # use constant input and become meaningless after constant folding. - // # DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE - // # GRAPPLER TEAM. - // config.graph_options.rewrite_options.constant_folding = ( - // rewriter_config_pb2.RewriterConfig.OFF) - // config.graph_options.rewrite_options.pin_to_host_optimization = ( - // rewriter_config_pb2.RewriterConfig.OFF) - return config; - }); - //TODO: use this instead of normal session - //return new ErrorLoggingSession(graph = graph, config = prepare_config(config)) - return new Session(graph);//, config = prepare_config(config)) - } - - private Session _get_cached_session( - Graph graph = null, - object config = null, - bool force_gpu = false, - bool crash_if_inconsistent_args = true) - { - // See cached_session() for documentation. - if (self._cached_session == null) - { - var sess = self._create_session(graph, config, force_gpu); - self._cached_session = sess; - self._cached_graph = graph; - self._cached_config = config; - self._cached_force_gpu = force_gpu; - return sess; - } else { - - if (crash_if_inconsistent_args && !self._cached_graph.Equals(graph)) - throw new ValueError(@"The graph used to get the cached session is - different than the one that was used to create the - session. Maybe create a new session with - self.session()"); - if (crash_if_inconsistent_args && !self._cached_config.Equals(config)) { - throw new ValueError(@"The config used to get the cached session is - different than the one that was used to create the - session. Maybe create a new session with - self.session()"); - } - if (crash_if_inconsistent_args && !self._cached_force_gpu.Equals(force_gpu)) { - throw new ValueError(@"The force_gpu value used to get the cached session is - different than the one that was used to create the - session. Maybe create a new session with - self.session()"); - } - return _cached_session; - } - } - - [TestCleanup] - public void Cleanup() - { - _ClearCachedSession(); - } - - #endregion - - public void AssetSequenceEqual(T[] a, T[] b) - { - Assert.IsTrue(Enumerable.SequenceEqual(a, b)); - } - } -} diff --git a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj index 78a0938c5..74663c1cb 100644 --- a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj +++ b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj @@ -36,6 +36,7 @@ + diff --git a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj index 7a6a7f92c..5264cb104 100644 --- a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj +++ b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj @@ -51,6 +51,7 @@ + diff --git a/test/TensorFlowNET.UnitTest/PythonTest.cs b/test/Tensorflow.UnitTest/PythonTest.cs similarity index 99% rename from test/TensorFlowNET.UnitTest/PythonTest.cs rename to test/Tensorflow.UnitTest/PythonTest.cs index 090ef097c..b2412ea9f 100644 --- a/test/TensorFlowNET.UnitTest/PythonTest.cs +++ b/test/Tensorflow.UnitTest/PythonTest.cs @@ -1,12 +1,9 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using Newtonsoft.Json.Linq; using Tensorflow.NumPy; -using System; using System.Collections; -using System.Linq; using Tensorflow; using static Tensorflow.Binding; -using System.Collections.Generic; namespace TensorFlowNET.UnitTest { diff --git a/test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj b/test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj new file mode 100644 index 000000000..66a7d63bd --- /dev/null +++ b/test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj @@ -0,0 +1,24 @@ + + + + net6.0 + enable + enable + + false + true + + + + + + + + + + + + + + + From 165e9169e49841bb2d326ff903949244565a1a00 Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 10 Nov 2023 21:01:12 +0000 Subject: [PATCH 163/182] assert all close --- .../GradientTest/GradientTest.cs | 22 +------------------ test/Tensorflow.UnitTest/PythonTest.cs | 18 +++++++-------- 2 files changed, 10 insertions(+), 30 deletions(-) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index e2d6db912..cea6de172 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -625,25 +625,6 @@ public void testPartialDerivatives() } } - // TODO: remove when np.testing.assert_allclose(a, b) is implemented - private class CollectionComparer : System.Collections.IComparer - { - private readonly double _epsilon = 1e-07; - - public int Compare(object x, object y) - { - var a = (double)x; - var b = (double)y; - - double delta = Math.Abs(a - b); - if (delta < _epsilon) - { - return 0; - } - return a.CompareTo(b); - } - } - private struct Case { public Tensor[] grad1; @@ -748,8 +729,7 @@ Tensor[] gradients(Tensor[] ys, Tensor[] xs, Tensor[] stop_gradients = null) var npgrad2 = result[1]; foreach (var (a, b) in npgrad1.Zip(npgrad2)) { - // TODO: np.testing.assert_allclose(a, b); - CollectionAssert.AreEqual(a.ToArray(), b.ToArray(), new CollectionComparer()); + self.assertAllClose(a, b); } } } diff --git a/test/Tensorflow.UnitTest/PythonTest.cs b/test/Tensorflow.UnitTest/PythonTest.cs index b2412ea9f..650f70f2c 100644 --- a/test/Tensorflow.UnitTest/PythonTest.cs +++ b/test/Tensorflow.UnitTest/PythonTest.cs @@ -185,9 +185,9 @@ public void assertProtoEquals(object toProto, object o) #region tensor evaluation and test session - private Session _cached_session = null; - private Graph _cached_graph = null; - private object _cached_config = null; + private Session? _cached_session = null; + private Graph? _cached_graph = null; + private object? _cached_config = null; private bool _cached_force_gpu = false; private void _ClearCachedSession() @@ -237,7 +237,7 @@ protected object _eval_tensor(object tensor) /// public T evaluate(Tensor tensor) { - object result = null; + object? result = null; // if context.executing_eagerly(): // return self._eval_helper(tensors) // else: @@ -274,7 +274,7 @@ public T evaluate(Tensor tensor) ///Returns a TensorFlow Session for use in executing tests. public Session cached_session( - Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) + Graph? graph = null, object? config = null, bool use_gpu = false, bool force_gpu = false) { // This method behaves differently than self.session(): for performance reasons // `cached_session` will by default reuse the same session within the same @@ -325,7 +325,7 @@ public Session cached_session( } //Returns a TensorFlow Session for use in executing tests. - public Session session(Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) + public Session session(Graph? graph = null, object? config = null, bool use_gpu = false, bool force_gpu = false) { //Note that this will set this session and the graph as global defaults. @@ -359,7 +359,7 @@ public Session session(Graph graph = null, object config = null, bool use_gpu = // A Session object that should be used as a context manager to surround // the graph building and execution code in a test case. - Session s = null; + Session? s = null; //if (context.executing_eagerly()) // yield None //else @@ -448,8 +448,8 @@ private Session _create_session(Graph graph, object cfg, bool forceGpu) } private Session _get_cached_session( - Graph graph = null, - object config = null, + Graph? graph = null, + object? config = null, bool force_gpu = false, bool crash_if_inconsistent_args = true) { From b906c9a69a15ad413f519db741335bdb1aedf07a Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 10 Nov 2023 21:16:42 +0000 Subject: [PATCH 164/182] fix nullability --- .../Tensorflow.Keras.UnitTest.csproj | 1 + test/Tensorflow.UnitTest/PythonTest.cs | 29 ++++++++++++++----- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj index 3910eba1c..e8b8d42b3 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj +++ b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj @@ -25,6 +25,7 @@ + diff --git a/test/Tensorflow.UnitTest/PythonTest.cs b/test/Tensorflow.UnitTest/PythonTest.cs index 650f70f2c..5d1b1e0e1 100644 --- a/test/Tensorflow.UnitTest/PythonTest.cs +++ b/test/Tensorflow.UnitTest/PythonTest.cs @@ -86,9 +86,9 @@ public void assertEqual(object given, object expected) Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); return; } - if (given is ICollection && expected is ICollection) + if (given is ICollection collectionGiven && expected is ICollection collectionExpected) { - assertItemsEqual(given as ICollection, expected as ICollection); + assertItemsEqual(collectionGiven, collectionExpected); return; } if (given is float && expected is float) @@ -150,8 +150,21 @@ public CollectionComparer(double eps = 1e-06) { _epsilon = eps; } - public int Compare(object x, object y) + public int Compare(object? x, object? y) { + if (x == null && y == null) + { + return 0; + } + else if (x == null) + { + return -1; + } + else if (y == null) + { + return 1; + } + var a = (double)x; var b = (double)y; @@ -206,7 +219,7 @@ private void _ClearCachedSession() // return nest.map_structure(self._eval_tensor, tensors); //} - protected object _eval_tensor(object tensor) + protected object? _eval_tensor(object tensor) { if (tensor == null) return None; @@ -273,7 +286,7 @@ public T evaluate(Tensor tensor) ///Returns a TensorFlow Session for use in executing tests. - public Session cached_session( + public Session? cached_session( Graph? graph = null, object? config = null, bool use_gpu = false, bool force_gpu = false) { // This method behaves differently than self.session(): for performance reasons @@ -369,7 +382,7 @@ public Session session(Graph? graph = null, object? config = null, bool use_gpu return s.as_default(); } - private Session _constrain_devices_and_set_default(Session sess, bool use_gpu, bool force_gpu) + private Session? _constrain_devices_and_set_default(Session sess, bool use_gpu, bool force_gpu) { // Set the session and its graph to global default and constrain devices.""" if (tf.executing_eagerly()) @@ -404,7 +417,7 @@ private Session _constrain_devices_and_set_default(Session sess, bool use_gpu, b } // See session() for details. - private Session _create_session(Graph graph, object cfg, bool forceGpu) + private Session _create_session(Graph? graph, object? cfg, bool forceGpu) { var prepare_config = new Func((config) => { @@ -485,7 +498,7 @@ different than the one that was used to create the session. Maybe create a new session with self.session()"); } - return _cached_session; + return self._cached_session; } } From b6db9410b3c66ad30ac900330708060231e39809 Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 10 Nov 2023 21:20:13 +0000 Subject: [PATCH 165/182] update packages --- .../TensorFlow.Kernel.UnitTest.csproj | 2 +- .../TensorFlowNET.Graph.UnitTest.csproj | 2 +- .../Tensorflow.Keras.UnitTest.csproj | 2 +- .../Tensorflow.Native.UnitTest.csproj | 2 +- test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj | 4 ++-- .../TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj b/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj index 21b2731b7..461993408 100644 --- a/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj +++ b/test/TensorFlow.Kernel.UnitTest/TensorFlow.Kernel.UnitTest.csproj @@ -10,7 +10,7 @@ - + diff --git a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj index 74663c1cb..40dd53f74 100644 --- a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj +++ b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj @@ -24,7 +24,7 @@ - + diff --git a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj index e8b8d42b3..edac1c2ff 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj +++ b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj @@ -13,7 +13,7 @@ - + diff --git a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj index a4f1ec567..c054a8707 100644 --- a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj +++ b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj @@ -44,7 +44,7 @@ - + diff --git a/test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj b/test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj index 66a7d63bd..9ad6bc7a5 100644 --- a/test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj +++ b/test/Tensorflow.UnitTest/Tensorflow.UnitTest.csproj @@ -1,4 +1,4 @@ - + net6.0 @@ -10,7 +10,7 @@ - + diff --git a/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj b/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj index 4c3918e4a..c93b89256 100644 --- a/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj +++ b/test/TensorflowNET.Hub.Unittest/Tensorflow.Hub.Unittest.csproj @@ -9,7 +9,7 @@ - + From 7968dc360fbcbb57265e8a49192c8b028e9d0196 Mon Sep 17 00:00:00 2001 From: Alexander Date: Sat, 11 Nov 2023 05:54:38 +0000 Subject: [PATCH 166/182] fix test --- test/Tensorflow.UnitTest/PythonTest.cs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/test/Tensorflow.UnitTest/PythonTest.cs b/test/Tensorflow.UnitTest/PythonTest.cs index 5d1b1e0e1..dff652933 100644 --- a/test/Tensorflow.UnitTest/PythonTest.cs +++ b/test/Tensorflow.UnitTest/PythonTest.cs @@ -133,13 +133,23 @@ public void assertTrue(bool cond) public void assertAllClose(NDArray array1, NDArray array2, double eps = 1e-5) { - Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); + CollectionAssert.AreEqual(array1.ToArray(), array2.ToArray(), new CollectionComparer(eps)); + + //TODO: Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); } public void assertAllClose(double value, NDArray array2, double eps = 1e-5) { + if (array2.shape.IsScalar) + { + double value2 = array2; + Assert.AreEqual(value, value2, eps); + return; + } var array1 = np.ones_like(array2) * value; - Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); + CollectionAssert.AreEqual(array1.ToArray(), array2.ToArray(), new CollectionComparer(eps)); + + //TODO: Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); } private class CollectionComparer : IComparer @@ -158,7 +168,7 @@ public int Compare(object? x, object? y) } else if (x == null) { - return -1; + return -1; } else if (y == null) { From d54f7a62e0e66dee73eff78ce5c93acb195ce813 Mon Sep 17 00:00:00 2001 From: Alexander Date: Mon, 13 Nov 2023 10:33:14 +0000 Subject: [PATCH 167/182] test: more gradients tests --- .../Training/GradientDescentOptimizerTests.cs | 113 ++++++++++++++++++ test/Tensorflow.UnitTest/PythonTest.cs | 45 +++++-- 2 files changed, 149 insertions(+), 9 deletions(-) diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs index d766890b2..f7062f00d 100644 --- a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -1,5 +1,6 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; +using System.Linq; using Tensorflow; using Tensorflow.NumPy; using static Tensorflow.Binding; @@ -67,6 +68,51 @@ public void TestBasic() TestBasic(); } + private void TestMinimizeResourceVariable() where T : struct + { + var dtype = GetTypeForNumericType(); + + // train.GradientDescentOptimizer is V1 only API. + tf.Graph().as_default(); + using (var sess = self.cached_session()) + { + var var0 = tf.Variable(new[,] { { 1.0f, 2.0f } }, dtype: dtype); + var var1 = tf.Variable(new[] { 3.0 }, dtype: dtype); + var x = tf.constant(new[,] { { 4.0f }, { 5.0f } }, dtype: dtype); + + var pred = math_ops.matmul(var0, x) + var1; + var loss = pred * pred; + var sgd_op = tf.train.GradientDescentOptimizer(3.0f).minimize(loss); + + var global_variables = tf.global_variables_initializer(); + sess.run(global_variables); + + sess.run(new[] { var0, var1 }); + // Fetch params to validate initial values + self.assertAllCloseAccordingToType(new[,] { { 1.0, 2.0 } }, self.evaluate(var0)); + self.assertAllCloseAccordingToType(new[] { 3.0 }, self.evaluate(var1)); + // Run 1 step of sgd + sgd_op.run(); + // Validate updated params + var np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0; + var np_grad = 2 * np_pred; + self.assertAllCloseAccordingToType( + new[,] { { 1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0 } }, + self.evaluate(var0)); + self.assertAllCloseAccordingToType( + new[] { 3.0 - np_grad }, + self.evaluate(var1)); + } + } + + [TestMethod] + public void TestMinimizeResourceVariable() + { + //TODO: add np.half + TestMinimizeResourceVariable(); + TestMinimizeResourceVariable(); + } + private void TestTensorLearningRate() where T : struct { var dtype = GetTypeForNumericType(); @@ -115,5 +161,72 @@ public void TestTensorLearningRate() TestTensorLearningRate(); TestTensorLearningRate(); } + + public void TestGradWrtRef() where T : struct + { + var dtype = GetTypeForNumericType(); + + var graph = tf.Graph().as_default(); + using (var sess = self.cached_session()) + { + var opt = tf.train.GradientDescentOptimizer(3.0f); + var values = new[] { 1.0, 3.0 }; + var vars_ = values.Select( + v => tf.Variable(new[] { v }, dtype: dtype) as IVariableV1 + ).ToList(); + var grads_and_vars = opt.compute_gradients(tf.add(vars_[0], vars_[1]), vars_); + sess.run(tf.global_variables_initializer()); + foreach (var (grad, _) in grads_and_vars) + self.assertAllCloseAccordingToType(new[] { 1.0 }, self.evaluate(grad)); + + } + } + + [TestMethod] + public void TestGradWrtRef() + { + TestGradWrtRef(); + TestGradWrtRef(); + } + + public void TestWithGlobalStep() where T : struct + { + var dtype = GetTypeForNumericType(); + + tf.Graph().as_default(); + using (var sess = self.cached_session()) + { + var global_step = tf.Variable(0, trainable: false); + var var0 = tf.Variable(new[] { 1.0, 2.0 }, dtype: dtype); + var var1 = tf.Variable(new[] { 3.0, 4.0 }, dtype: dtype); + var grads0 = tf.constant(new[] { 0.1, 0.1 }, dtype: dtype); + var grads1 = tf.constant(new[] { 0.01, 0.01 }, dtype: dtype); + var grads_and_vars = new[] { + Tuple.Create(grads0, var0 as IVariableV1), + Tuple.Create(grads1, var1 as IVariableV1) + }; + var sgd_op = tf.train.GradientDescentOptimizer(3.0f) + .apply_gradients(grads_and_vars, global_step: global_step); + + sess.run(tf.global_variables_initializer()); + // Fetch params to validate initial values + self.assertAllCloseAccordingToType(new[] { 1.0, 2.0 }, self.evaluate(var0)); + self.assertAllCloseAccordingToType(new[] { 3.0, 4.0 }, self.evaluate(var1)); + // Run 1 step of sgd + sgd_op.run(); + // Validate updated params and global_step + self.assertAllCloseAccordingToType(new[] { 1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1 }, self.evaluate(var0)); + self.assertAllCloseAccordingToType(new[] { 3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01 }, self.evaluate(var1)); + Assert.AreEqual(1, self.evaluate(global_step)); + } + + } + + [TestMethod] + public void TestWithGlobalStep() + { + TestWithGlobalStep(); + TestWithGlobalStep(); + } } } diff --git a/test/Tensorflow.UnitTest/PythonTest.cs b/test/Tensorflow.UnitTest/PythonTest.cs index dff652933..1ccd39f02 100644 --- a/test/Tensorflow.UnitTest/PythonTest.cs +++ b/test/Tensorflow.UnitTest/PythonTest.cs @@ -175,8 +175,8 @@ public int Compare(object? x, object? y) return 1; } - var a = (double)x; - var b = (double)y; + var a = Convert.ToDouble(x); + var b = Convert.ToDouble(y); double delta = Math.Abs(a - b); if (delta < _epsilon) @@ -187,6 +187,19 @@ public int Compare(object? x, object? y) } } + public void assertAllCloseAccordingToType( + double[,] expected, + T[,] given, + double eps = 1e-6, + float float_eps = 1e-6f) + { + Assert.AreEqual(expected.GetLength(0), given.GetLength(0)); + Assert.AreEqual(expected.GetLength(1), given.GetLength(1)); + + var flattenGiven = given.Cast().ToArray(); + assertAllCloseAccordingToType(expected, flattenGiven, eps, float_eps); + } + public void assertAllCloseAccordingToType( ICollection expected, ICollection given, @@ -267,21 +280,35 @@ public T evaluate(Tensor tensor) { var sess = tf.get_default_session(); var ndarray = tensor.eval(sess); - if (typeof(T) == typeof(double) - || typeof(T) == typeof(float) - || typeof(T) == typeof(int)) + + if (typeof(T) == typeof(int)) + { + int i = ndarray; + result = i; + } + else if (typeof(T) == typeof(float)) + { + float f = ndarray; + result = f; + } + else if (typeof(T) == typeof(double)) { - result = Convert.ChangeType(ndarray, typeof(T)); + double d = ndarray; + result = d; } - else if (typeof(T) == typeof(double[])) + else if ( + typeof(T) == typeof(double[]) + || typeof(T) == typeof(double[,])) { result = ndarray.ToMultiDimArray(); } - else if (typeof(T) == typeof(float[])) + else if (typeof(T) == typeof(float[]) + || typeof(T) == typeof(float[,])) { result = ndarray.ToMultiDimArray(); } - else if (typeof(T) == typeof(int[])) + else if (typeof(T) == typeof(int[]) + || typeof(T) == typeof(int[,])) { result = ndarray.ToMultiDimArray(); } From eb0f02577290d930930349870b161e85553e967a Mon Sep 17 00:00:00 2001 From: barfeous Date: Mon, 12 Feb 2024 13:28:54 -0600 Subject: [PATCH 168/182] avoid modifying collection --- .../Training/Saving/SavedModel/AugmentedGraphView.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs b/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs index a91933357..c6b26ff49 100644 --- a/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs +++ b/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs @@ -88,7 +88,7 @@ private ConcreteFunction maybe_uncache_variable_captures(ConcreteFunction concre public override (IList, IDictionary>) breadth_first_traversal() { - Trackable get_merged_trackable(Trackable x) + void merged_trackable(Trackable x) { // TODO: complete it with new definitions `Asset` and `TrackableConstant`. return x; @@ -100,7 +100,7 @@ Trackable get_merged_trackable(Trackable x) // skip the deletion of cache (maybe do it later). foreach(var pair in _children_cache[obj]) { - _children_cache[obj][pair.Key] = get_merged_trackable(pair.Value); + merged_trackable(pair.Value); } } From 3448b6434680270026a0f938e913ff1f08f1df9b Mon Sep 17 00:00:00 2001 From: barfeous Date: Wed, 14 Feb 2024 20:25:15 -0600 Subject: [PATCH 169/182] Remove parameter return from newly void local method --- .../Training/Saving/SavedModel/AugmentedGraphView.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs b/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs index c6b26ff49..3b4bbdc63 100644 --- a/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs +++ b/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs @@ -91,8 +91,8 @@ public override (IList, IDictionary Date: Mon, 11 Mar 2024 03:05:42 +0800 Subject: [PATCH 170/182] docs: update README.md --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 0198c873c..75cad0aa7 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,14 @@ English | [中文](docs/README-CN.md) +> [!IMPORTANT] +> We're happy that our work on tensorflow.net has attracted many users. However, at this time, none of the main maintainers of this repo is available for new features and bug fix. We won't refuse PRs and will help to review them. +> +> If you would like to be a contributor or maintainer of tensorflow.net, we'd like to help you to start up. +> +> We feel sorry for that and we'll resume the maintaining for this project once one of us has bandwidth for it. +> + *master branch and v0.100.x is corresponding to tensorflow v2.10, v0.6x branch is from tensorflow v2.6, v0.15-tensorflow1.15 is from tensorflow1.15. Please add `https://www.myget.org/F/scisharp/api/v3/index.json` to nuget source to use nightly release.* From 4a31621a5632c7d6b2ebca1d36561458b91367c5 Mon Sep 17 00:00:00 2001 From: barfeous Date: Sun, 28 Apr 2024 13:04:07 -0500 Subject: [PATCH 171/182] Use TryGetValue instead of ContainsKey + [] --- .../Training/Saving/SavedModel/AugmentedGraphView.cs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs b/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs index 3b4bbdc63..9d0b3f001 100644 --- a/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs +++ b/src/TensorFlowNET.Core/Training/Saving/SavedModel/AugmentedGraphView.cs @@ -109,15 +109,11 @@ void merged_trackable(Trackable x) public List<(string, Trackable)> list_dependencies(Trackable obj) { - IDictionary children; - if (!_children_cache.ContainsKey(obj)) + if (!_children_cache.TryGetValue(obj, out var children)) { children= new Dictionary(); } - else - { - children= _children_cache[obj]; - } + List<(string, Trackable)> res = new(); foreach(var pair in obj.deserialization_dependencies(children)) { From f5ba382e49ab0132308739c219ea09b6ac254223 Mon Sep 17 00:00:00 2001 From: Schoen Tannenbaum <169845314+SchoenTannenbaum@users.noreply.github.com> Date: Mon, 20 May 2024 12:09:06 -0400 Subject: [PATCH 172/182] Regularizer addition and fixes --- .../Keras/Regularizers/IRegularizer.cs | 17 ++++-- .../CustomizedRegularizerJsonConverter.cs | 57 +++++++++++++++++++ .../Operations/Regularizers/L1.cs | 33 +++++++++++ .../Operations/Regularizers/L1L2.cs | 48 ++++++++++++++++ .../Operations/Regularizers/L2.cs | 33 +++++++++++ src/TensorFlowNET.Keras/Regularizers.cs | 19 +++++-- src/TensorFlowNET.Keras/Regularizers/L1.cs | 19 ------- src/TensorFlowNET.Keras/Regularizers/L1L2.cs | 24 -------- src/TensorFlowNET.Keras/Regularizers/L2.cs | 17 ------ 9 files changed, 198 insertions(+), 69 deletions(-) create mode 100644 src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedRegularizerJsonConverter.cs create mode 100644 src/TensorFlowNET.Core/Operations/Regularizers/L1.cs create mode 100644 src/TensorFlowNET.Core/Operations/Regularizers/L1L2.cs create mode 100644 src/TensorFlowNET.Core/Operations/Regularizers/L2.cs delete mode 100644 src/TensorFlowNET.Keras/Regularizers/L1.cs delete mode 100644 src/TensorFlowNET.Keras/Regularizers/L1L2.cs delete mode 100644 src/TensorFlowNET.Keras/Regularizers/L2.cs diff --git a/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs b/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs index f4045c7b2..e5de76ddb 100644 --- a/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs +++ b/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs @@ -1,7 +1,16 @@ -namespace Tensorflow.Keras +using Newtonsoft.Json; +using System.Collections.Generic; +using Tensorflow.Keras.Saving.Common; + +namespace Tensorflow.Keras { - public interface IRegularizer - { - Tensor Apply(RegularizerArgs args); + [JsonConverter(typeof(CustomizedRegularizerJsonConverter))] + public interface IRegularizer + { + [JsonProperty("class_name")] + string ClassName { get; } + [JsonProperty("config")] + IDictionary Config { get; } + Tensor Apply(RegularizerArgs args); } } diff --git a/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedRegularizerJsonConverter.cs b/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedRegularizerJsonConverter.cs new file mode 100644 index 000000000..4b1790aca --- /dev/null +++ b/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedRegularizerJsonConverter.cs @@ -0,0 +1,57 @@ +using Newtonsoft.Json.Linq; +using Newtonsoft.Json; +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Operations.Regularizers; + +namespace Tensorflow.Keras.Saving.Common +{ + class RegularizerInfo + { + public string class_name { get; set; } + public JObject config { get; set; } + } + + public class CustomizedRegularizerJsonConverter : JsonConverter + { + public override bool CanConvert(Type objectType) + { + return objectType == typeof(IRegularizer); + } + + public override bool CanRead => true; + + public override bool CanWrite => true; + + public override void WriteJson(JsonWriter writer, object? value, JsonSerializer serializer) + { + var regularizer = value as IRegularizer; + if (regularizer is null) + { + JToken.FromObject(null).WriteTo(writer); + return; + } + JToken.FromObject(new RegularizerInfo() + { + class_name = regularizer.ClassName, + config = JObject.FromObject(regularizer.Config) + }, serializer).WriteTo(writer); + } + + public override object? ReadJson(JsonReader reader, Type objectType, object? existingValue, JsonSerializer serializer) + { + var info = serializer.Deserialize(reader); + if (info is null) + { + return null; + } + return info.class_name switch + { + "L1L2" => new L1L2 (info.config["l1"].ToObject(), info.config["l2"].ToObject()), + "L1" => new L1(info.config["l1"].ToObject()), + "L2" => new L2(info.config["l2"].ToObject()), + }; + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs b/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs new file mode 100644 index 000000000..8a5c68895 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs @@ -0,0 +1,33 @@ +using System; + +using Tensorflow.Keras; + +namespace Tensorflow.Operations.Regularizers +{ + public class L1 : IRegularizer + { + float _l1; + private readonly Dictionary _config; + + public string ClassName => "L2"; + public virtual IDictionary Config => _config; + + public L1(float l1 = 0.01f) + { + // l1 = 0.01 if l1 is None else l1 + // validate_float_arg(l1, name = "l1") + // self.l1 = ops.convert_to_tensor(l1) + this._l1 = l1; + + _config = new(); + _config["l1"] = _l1; + } + + + public Tensor Apply(RegularizerArgs args) + { + //return self.l1 * ops.sum(ops.absolute(x)) + return _l1 * math_ops.reduce_sum(math_ops.abs(args.X)); + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/Regularizers/L1L2.cs b/src/TensorFlowNET.Core/Operations/Regularizers/L1L2.cs new file mode 100644 index 000000000..e3af00eb5 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/Regularizers/L1L2.cs @@ -0,0 +1,48 @@ +using System; + +using Tensorflow.Keras; + +namespace Tensorflow.Operations.Regularizers +{ + public class L1L2 : IRegularizer + { + float _l1; + float _l2; + private readonly Dictionary _config; + + public string ClassName => "L1L2"; + public virtual IDictionary Config => _config; + + public L1L2(float l1 = 0.0f, float l2 = 0.0f) + { + //l1 = 0.0 if l1 is None else l1 + //l2 = 0.0 if l2 is None else l2 + // validate_float_arg(l1, name = "l1") + // validate_float_arg(l2, name = "l2") + + // self.l1 = l1 + // self.l2 = l2 + this._l1 = l1; + this._l2 = l2; + + _config = new(); + _config["l1"] = l1; + _config["l2"] = l2; + } + + public Tensor Apply(RegularizerArgs args) + { + //regularization = ops.convert_to_tensor(0.0, dtype = x.dtype) + //if self.l1: + // regularization += self.l1 * ops.sum(ops.absolute(x)) + //if self.l2: + // regularization += self.l2 * ops.sum(ops.square(x)) + //return regularization + + Tensor regularization = tf.constant(0.0, args.X.dtype); + regularization += _l1 * math_ops.reduce_sum(math_ops.abs(args.X)); + regularization += _l2 * math_ops.reduce_sum(math_ops.square(args.X)); + return regularization; + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/Regularizers/L2.cs b/src/TensorFlowNET.Core/Operations/Regularizers/L2.cs new file mode 100644 index 000000000..6c0e950a9 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/Regularizers/L2.cs @@ -0,0 +1,33 @@ +using System; + +using Tensorflow.Keras; + +namespace Tensorflow.Operations.Regularizers +{ + public class L2 : IRegularizer + { + float _l2; + private readonly Dictionary _config; + + public string ClassName => "L2"; + public virtual IDictionary Config => _config; + + public L2(float l2 = 0.01f) + { + // l2 = 0.01 if l2 is None else l2 + // validate_float_arg(l2, name = "l2") + // self.l2 = l2 + this._l2 = l2; + + _config = new(); + _config["l2"] = _l2; + } + + + public Tensor Apply(RegularizerArgs args) + { + //return self.l2 * ops.sum(ops.square(x)) + return _l2 * math_ops.reduce_sum(math_ops.square(args.X)); + } + } +} diff --git a/src/TensorFlowNET.Keras/Regularizers.cs b/src/TensorFlowNET.Keras/Regularizers.cs index 98da27a7f..9c6d07ca6 100644 --- a/src/TensorFlowNET.Keras/Regularizers.cs +++ b/src/TensorFlowNET.Keras/Regularizers.cs @@ -1,8 +1,17 @@ namespace Tensorflow.Keras { - public class Regularizers - { - public IRegularizer l2(float l2 = 0.01f) - => new L2(l2); - } + public class Regularizers + { + public IRegularizer l1(float l1 = 0.01f) + => new Tensorflow.Operations.Regularizers.L1(l1); + public IRegularizer l2(float l2 = 0.01f) + => new Tensorflow.Operations.Regularizers.L2(l2); + + //From TF source + //# The default value for l1 and l2 are different from the value in l1_l2 + //# for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2 + //# and no l1 penalty. + public IRegularizer l1l2(float l1 = 0.00f, float l2 = 0.00f) + => new Tensorflow.Operations.Regularizers.L1L2(l1, l2); + } } diff --git a/src/TensorFlowNET.Keras/Regularizers/L1.cs b/src/TensorFlowNET.Keras/Regularizers/L1.cs deleted file mode 100644 index 0f904b6f9..000000000 --- a/src/TensorFlowNET.Keras/Regularizers/L1.cs +++ /dev/null @@ -1,19 +0,0 @@ -using System; - -namespace Tensorflow.Keras -{ - public class L1 : IRegularizer - { - float l1; - - public L1(float l1 = 0.01f) - { - this.l1 = l1; - } - - public Tensor Apply(RegularizerArgs args) - { - return l1 * math_ops.reduce_sum(math_ops.abs(args.X)); - } - } -} diff --git a/src/TensorFlowNET.Keras/Regularizers/L1L2.cs b/src/TensorFlowNET.Keras/Regularizers/L1L2.cs deleted file mode 100644 index f619f1582..000000000 --- a/src/TensorFlowNET.Keras/Regularizers/L1L2.cs +++ /dev/null @@ -1,24 +0,0 @@ -using System; -using static Tensorflow.Binding; -namespace Tensorflow.Keras -{ - public class L1L2 : IRegularizer - { - float l1; - float l2; - - public L1L2(float l1 = 0.0f, float l2 = 0.0f) - { - this.l1 = l1; - this.l2 = l2; - - } - public Tensor Apply(RegularizerArgs args) - { - Tensor regularization = tf.constant(0.0, args.X.dtype); - regularization += l1 * math_ops.reduce_sum(math_ops.abs(args.X)); - regularization += l2 * math_ops.reduce_sum(math_ops.square(args.X)); - return regularization; - } - } -} diff --git a/src/TensorFlowNET.Keras/Regularizers/L2.cs b/src/TensorFlowNET.Keras/Regularizers/L2.cs deleted file mode 100644 index 034bbd236..000000000 --- a/src/TensorFlowNET.Keras/Regularizers/L2.cs +++ /dev/null @@ -1,17 +0,0 @@ -namespace Tensorflow.Keras -{ - public class L2 : IRegularizer - { - float l2; - - public L2(float l2 = 0.01f) - { - this.l2 = l2; - } - - public Tensor Apply(RegularizerArgs args) - { - return l2 * math_ops.reduce_sum(math_ops.square(args.X)); - } - } -} From 5f9fce572d07768de9c1386bf29264a345e16c8c Mon Sep 17 00:00:00 2001 From: Schoen Tannenbaum <169845314+SchoenTannenbaum@users.noreply.github.com> Date: Mon, 20 May 2024 12:10:09 -0400 Subject: [PATCH 173/182] RegularizerAPI and UnitTest --- .../Keras/Regularizers/IRegularizer.cs | 11 ++++- .../Operations/Regularizers/L1.cs | 2 +- src/TensorFlowNET.Keras/Regularizers.cs | 44 +++++++++++++++-- .../Model/ModelLoadTest.cs | 48 +++++++++++++++++++ 4 files changed, 98 insertions(+), 7 deletions(-) diff --git a/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs b/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs index e5de76ddb..06dbb7c8c 100644 --- a/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs +++ b/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs @@ -12,5 +12,14 @@ public interface IRegularizer [JsonProperty("config")] IDictionary Config { get; } Tensor Apply(RegularizerArgs args); - } + } + + public interface IRegularizerApi + { + IRegularizer GetRegularizerFromName(string name); + IRegularizer L1 { get; } + IRegularizer L2 { get; } + IRegularizer L1L2 { get; } + } + } diff --git a/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs b/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs index 8a5c68895..9e0619454 100644 --- a/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs +++ b/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs @@ -9,7 +9,7 @@ public class L1 : IRegularizer float _l1; private readonly Dictionary _config; - public string ClassName => "L2"; + public string ClassName => "L1"; public virtual IDictionary Config => _config; public L1(float l1 = 0.01f) diff --git a/src/TensorFlowNET.Keras/Regularizers.cs b/src/TensorFlowNET.Keras/Regularizers.cs index 9c6d07ca6..73b72a051 100644 --- a/src/TensorFlowNET.Keras/Regularizers.cs +++ b/src/TensorFlowNET.Keras/Regularizers.cs @@ -1,17 +1,51 @@ -namespace Tensorflow.Keras +using Tensorflow.Operations.Regularizers; + +namespace Tensorflow.Keras { - public class Regularizers + public class Regularizers: IRegularizerApi { + private static Dictionary _nameActivationMap; + public IRegularizer l1(float l1 = 0.01f) - => new Tensorflow.Operations.Regularizers.L1(l1); + => new L1(l1); public IRegularizer l2(float l2 = 0.01f) - => new Tensorflow.Operations.Regularizers.L2(l2); + => new L2(l2); //From TF source //# The default value for l1 and l2 are different from the value in l1_l2 //# for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2 //# and no l1 penalty. public IRegularizer l1l2(float l1 = 0.00f, float l2 = 0.00f) - => new Tensorflow.Operations.Regularizers.L1L2(l1, l2); + => new L1L2(l1, l2); + + static Regularizers() + { + _nameActivationMap = new Dictionary(); + _nameActivationMap["L1"] = new L1(); + _nameActivationMap["L1"] = new L2(); + _nameActivationMap["L1"] = new L1L2(); + } + + public IRegularizer L1 => l1(); + + public IRegularizer L2 => l2(); + + public IRegularizer L1L2 => l1l2(); + + public IRegularizer GetRegularizerFromName(string name) + { + if (name == null) + { + throw new Exception($"Regularizer name cannot be null"); + } + if (!_nameActivationMap.TryGetValue(name, out var res)) + { + throw new Exception($"Regularizer {name} not found"); + } + else + { + return res; + } + } } } diff --git a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs index 53a67cbfa..c733537e7 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Model/ModelLoadTest.cs @@ -1,6 +1,7 @@ using Microsoft.VisualStudio.TestPlatform.Utilities; using Microsoft.VisualStudio.TestTools.UnitTesting; using Newtonsoft.Json.Linq; +using System.Collections.Generic; using System.Linq; using System.Xml.Linq; using Tensorflow.Keras.Engine; @@ -129,6 +130,53 @@ public void TestModelBeforeTF2_5() } + [TestMethod] + public void BiasRegularizerSaveAndLoad() + { + var savemodel = keras.Sequential(new List() + { + tf.keras.layers.InputLayer((227, 227, 3)), + tf.keras.layers.Conv2D(96, (11, 11), (4, 4), activation:"relu", padding:"valid"), + tf.keras.layers.BatchNormalization(), + tf.keras.layers.MaxPooling2D((3, 3), strides:(2, 2)), + + tf.keras.layers.Conv2D(256, (5, 5), (1, 1), "same", activation: keras.activations.Relu, bias_regularizer:keras.regularizers.L1L2), + tf.keras.layers.BatchNormalization(), + + tf.keras.layers.Conv2D(256, (5, 5), (1, 1), "same", activation: keras.activations.Relu, bias_regularizer:keras.regularizers.L2), + tf.keras.layers.BatchNormalization(), + + tf.keras.layers.Conv2D(256, (5, 5), (1, 1), "same", activation: keras.activations.Relu, bias_regularizer:keras.regularizers.L1), + tf.keras.layers.BatchNormalization(), + tf.keras.layers.MaxPooling2D((3, 3), (2, 2)), + + tf.keras.layers.Flatten(), + + tf.keras.layers.Dense(1000, activation: "linear"), + tf.keras.layers.Softmax(1) + }); + + savemodel.compile(tf.keras.optimizers.Adam(), tf.keras.losses.SparseCategoricalCrossentropy(from_logits: true), new string[] { "accuracy" }); + + var num_epochs = 1; + var batch_size = 8; + + var trainDataset = new RandomDataSet(new Shape(227, 227, 3), 16); + + savemodel.fit(trainDataset.Data, trainDataset.Labels, batch_size, num_epochs); + + savemodel.save(@"./bias_regularizer_save_and_load", save_format: "tf"); + + var loadModel = tf.keras.models.load_model(@"./bias_regularizer_save_and_load"); + loadModel.summary(); + + loadModel.compile(tf.keras.optimizers.Adam(), tf.keras.losses.SparseCategoricalCrossentropy(from_logits: true), new string[] { "accuracy" }); + + var fitDataset = new RandomDataSet(new Shape(227, 227, 3), 16); + + loadModel.fit(fitDataset.Data, fitDataset.Labels, batch_size, num_epochs); + } + [TestMethod] public void CreateConcatenateModelSaveAndLoad() From b3ce158ec3304469bf776bc582b847e685a9df73 Mon Sep 17 00:00:00 2001 From: novikov-alexander <79649566+novikov-alexander@users.noreply.github.com> Date: Fri, 14 Jun 2024 14:40:06 +0300 Subject: [PATCH 174/182] Update tensor_util.cs --- src/TensorFlowNET.Core/Tensors/tensor_util.cs | 40 +++++++++++++------ 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/src/TensorFlowNET.Core/Tensors/tensor_util.cs b/src/TensorFlowNET.Core/Tensors/tensor_util.cs index f688d4d5d..f2003c9d4 100644 --- a/src/TensorFlowNET.Core/Tensors/tensor_util.cs +++ b/src/TensorFlowNET.Core/Tensors/tensor_util.cs @@ -1,4 +1,4 @@ -/***************************************************************************** +/***************************************************************************** Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); @@ -135,6 +135,23 @@ T[] ExpandArrayToSize(IList src) TF_DataType.TF_QINT32 }; + private static TOut[,] ConvertArray2D(TIn[,] inputArray, Func converter) + { + var rows = inputArray.GetLength(0); + var cols = inputArray.GetLength(1); + var outputArray = new TOut[rows, cols]; + + for (var i = 0; i < rows; i++) + { + for (var j = 0; j < cols; j++) + { + outputArray[i, j] = converter(inputArray[i, j]); + } + } + + return outputArray; + } + /// /// Create a TensorProto, invoked in graph mode /// @@ -157,19 +174,16 @@ public static TensorProto make_tensor_proto(object values, TF_DataType dtype = T else if(origin_dtype != dtype) { var new_system_dtype = dtype.as_system_dtype(); - if (values is long[] long_values) - { - if (dtype == TF_DataType.TF_INT32) - values = long_values.Select(x => (int)Convert.ChangeType(x, new_system_dtype)).ToArray(); - } - else if (values is double[] double_values) + + values = values switch { - if (dtype == TF_DataType.TF_FLOAT) - values = double_values.Select(x => (float)Convert.ChangeType(x, new_system_dtype)).ToArray(); - } - else - values = Convert.ChangeType(values, new_system_dtype); - + long[] longValues when dtype == TF_DataType.TF_INT32 => longValues.Select(x => (int)x).ToArray(), + float[] floatValues when dtype == TF_DataType.TF_DOUBLE => floatValues.Select(x => (double)x).ToArray(), + float[,] float2DValues when dtype == TF_DataType.TF_DOUBLE => ConvertArray2D(float2DValues, Convert.ToDouble), + double[] doubleValues when dtype == TF_DataType.TF_FLOAT => doubleValues.Select(x => (float)x).ToArray(), + double[,] double2DValues when dtype == TF_DataType.TF_DOUBLE => ConvertArray2D(double2DValues, Convert.ToSingle), + _ => Convert.ChangeType(values, new_system_dtype), + }; dtype = values.GetDataType(); } From 18db147eb40a07931e8421bbd63c64ce11edd558 Mon Sep 17 00:00:00 2001 From: novikov-alexander <79649566+novikov-alexander@users.noreply.github.com> Date: Fri, 14 Jun 2024 14:40:37 +0300 Subject: [PATCH 175/182] Update GradientDescentOptimizerTests.cs --- .../Training/GradientDescentOptimizerTests.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs index f7062f00d..3b53ff9cd 100644 --- a/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs +++ b/test/TensorFlowNET.UnitTest/Training/GradientDescentOptimizerTests.cs @@ -1,4 +1,4 @@ -using Microsoft.VisualStudio.TestTools.UnitTesting; +using Microsoft.VisualStudio.TestTools.UnitTesting; using System; using System.Linq; using Tensorflow; @@ -82,7 +82,7 @@ private void TestMinimizeResourceVariable() where T : struct var pred = math_ops.matmul(var0, x) + var1; var loss = pred * pred; - var sgd_op = tf.train.GradientDescentOptimizer(3.0f).minimize(loss); + var sgd_op = tf.train.GradientDescentOptimizer(1.0f).minimize(loss); var global_variables = tf.global_variables_initializer(); sess.run(global_variables); From 483ac82cd2db273c2c0520ce6923f5951638daba Mon Sep 17 00:00:00 2001 From: novikov-alexander <79649566+novikov-alexander@users.noreply.github.com> Date: Fri, 14 Jun 2024 15:02:17 +0300 Subject: [PATCH 176/182] Update tensor_util.cs --- src/TensorFlowNET.Core/Tensors/tensor_util.cs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/Tensors/tensor_util.cs b/src/TensorFlowNET.Core/Tensors/tensor_util.cs index f2003c9d4..873579e42 100644 --- a/src/TensorFlowNET.Core/Tensors/tensor_util.cs +++ b/src/TensorFlowNET.Core/Tensors/tensor_util.cs @@ -178,10 +178,15 @@ public static TensorProto make_tensor_proto(object values, TF_DataType dtype = T values = values switch { long[] longValues when dtype == TF_DataType.TF_INT32 => longValues.Select(x => (int)x).ToArray(), + long[] longValues => values, float[] floatValues when dtype == TF_DataType.TF_DOUBLE => floatValues.Select(x => (double)x).ToArray(), + float[] floatValues => values, float[,] float2DValues when dtype == TF_DataType.TF_DOUBLE => ConvertArray2D(float2DValues, Convert.ToDouble), + float[,] float2DValues => values, double[] doubleValues when dtype == TF_DataType.TF_FLOAT => doubleValues.Select(x => (float)x).ToArray(), - double[,] double2DValues when dtype == TF_DataType.TF_DOUBLE => ConvertArray2D(double2DValues, Convert.ToSingle), + double[] doubleValues => values, + double[,] double2DValues when dtype == TF_DataType.TF_FLOAT => ConvertArray2D(double2DValues, Convert.ToSingle), + double[,] double2DValues => values, _ => Convert.ChangeType(values, new_system_dtype), }; dtype = values.GetDataType(); From def57745b66d0537cdb70251584c940f327cd929 Mon Sep 17 00:00:00 2001 From: Alexander Novikov Date: Wed, 19 Jun 2024 12:30:38 +0300 Subject: [PATCH 177/182] fix: more generic array cast --- src/TensorFlowNET.Core/Tensors/tensor_util.cs | 88 +++++++++++++------ 1 file changed, 59 insertions(+), 29 deletions(-) diff --git a/src/TensorFlowNET.Core/Tensors/tensor_util.cs b/src/TensorFlowNET.Core/Tensors/tensor_util.cs index 873579e42..6e5024efd 100644 --- a/src/TensorFlowNET.Core/Tensors/tensor_util.cs +++ b/src/TensorFlowNET.Core/Tensors/tensor_util.cs @@ -67,7 +67,7 @@ public static NDArray MakeNdarray(TensorProto tensor) T[] ExpandArrayToSize(IList src) { - if(src.Count == 0) + if (src.Count == 0) { return new T[0]; } @@ -77,7 +77,7 @@ T[] ExpandArrayToSize(IList src) var first_elem = src[0]; var last_elem = src[src.Count - 1]; T[] res = new T[num_elements]; - for(long i = 0; i < num_elements; i++) + for (long i = 0; i < num_elements; i++) { if (i < pre) res[i] = first_elem; else if (i >= num_elements - after) res[i] = last_elem; @@ -121,7 +121,7 @@ T[] ExpandArrayToSize(IList src) $"https://www.tensorflow.org/api_docs/python/tf/dtypes for supported TF dtypes."); } - if(values.size == 0) + if (values.size == 0) { return np.zeros(shape, tensor_dtype); } @@ -135,23 +135,47 @@ T[] ExpandArrayToSize(IList src) TF_DataType.TF_QINT32 }; - private static TOut[,] ConvertArray2D(TIn[,] inputArray, Func converter) + private static Array ConvertArray(Array inputArray, Func converter) { - var rows = inputArray.GetLength(0); - var cols = inputArray.GetLength(1); - var outputArray = new TOut[rows, cols]; + if (inputArray == null) + throw new ArgumentNullException(nameof(inputArray)); - for (var i = 0; i < rows; i++) + var elementType = typeof(TOut); + var lengths = new int[inputArray.Rank]; + for (var i = 0; i < inputArray.Rank; i++) { - for (var j = 0; j < cols; j++) - { - outputArray[i, j] = converter(inputArray[i, j]); - } + lengths[i] = inputArray.GetLength(i); } + var outputArray = Array.CreateInstance(elementType, lengths); + + FillArray(inputArray, outputArray, converter, new int[inputArray.Rank], 0); + return outputArray; } + private static void FillArray(Array inputArray, Array outputArray, Func converter, int[] indices, int dimension) + { + if (dimension == inputArray.Rank - 1) + { + for (int i = 0; i < inputArray.GetLength(dimension); i++) + { + indices[dimension] = i; + var inputValue = (TIn)inputArray.GetValue(indices); + var convertedValue = converter(inputValue); + outputArray.SetValue(convertedValue, indices); + } + } + else + { + for (int i = 0; i < inputArray.GetLength(dimension); i++) + { + indices[dimension] = i; + FillArray(inputArray, outputArray, converter, indices, dimension + 1); + } + } + } + /// /// Create a TensorProto, invoked in graph mode /// @@ -171,24 +195,30 @@ public static TensorProto make_tensor_proto(object values, TF_DataType dtype = T var origin_dtype = values.GetDataType(); if (dtype == TF_DataType.DtInvalid) dtype = origin_dtype; - else if(origin_dtype != dtype) + else if (origin_dtype != dtype) { var new_system_dtype = dtype.as_system_dtype(); - - values = values switch + + if (dtype != TF_DataType.TF_STRING && dtype != TF_DataType.TF_VARIANT && dtype != TF_DataType.TF_RESOURCE) + { + if (values is Array arrayValues) + { + values = dtype switch + { + TF_DataType.TF_INT32 => ConvertArray(arrayValues, Convert.ToInt32), + TF_DataType.TF_FLOAT => ConvertArray(arrayValues, Convert.ToSingle), + TF_DataType.TF_DOUBLE => ConvertArray(arrayValues, Convert.ToDouble), + _ => values, + }; + } else + { + values = Convert.ChangeType(values, new_system_dtype); + } + + } else { - long[] longValues when dtype == TF_DataType.TF_INT32 => longValues.Select(x => (int)x).ToArray(), - long[] longValues => values, - float[] floatValues when dtype == TF_DataType.TF_DOUBLE => floatValues.Select(x => (double)x).ToArray(), - float[] floatValues => values, - float[,] float2DValues when dtype == TF_DataType.TF_DOUBLE => ConvertArray2D(float2DValues, Convert.ToDouble), - float[,] float2DValues => values, - double[] doubleValues when dtype == TF_DataType.TF_FLOAT => doubleValues.Select(x => (float)x).ToArray(), - double[] doubleValues => values, - double[,] double2DValues when dtype == TF_DataType.TF_FLOAT => ConvertArray2D(double2DValues, Convert.ToSingle), - double[,] double2DValues => values, - _ => Convert.ChangeType(values, new_system_dtype), - }; + + } dtype = values.GetDataType(); } @@ -306,7 +336,7 @@ bool hasattr(Graph property, string attr) if (tensor is EagerTensor eagerTensor) { - if(tensor.dtype == tf.int64) + if (tensor.dtype == tf.int64) return new Shape(tensor.ToArray()); else return new Shape(tensor.ToArray()); @@ -481,7 +511,7 @@ bool hasattr(Graph property, string attr) var d_ = new int[value.size]; foreach (var (index, d) in enumerate(value.ToArray())) d_[index] = d >= 0 ? d : -1; - + ret = ret.merge_with(new Shape(d_)); } return ret; From 5142ad658cf9233abd2c9fe727c2daeea84a88f6 Mon Sep 17 00:00:00 2001 From: Aleksej Solomatin Date: Sun, 30 Jun 2024 22:06:12 +0300 Subject: [PATCH 178/182] test: Added an `evaluate` method call to a unit test for a multi-input model. --- test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs b/test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs index dd8ef8f91..bb293bd90 100644 --- a/test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs @@ -54,6 +54,13 @@ public void LeNetModel() var x = new NDArray[] { x1, x2 }; model.fit(x, dataset.Train.Labels, batch_size: 8, epochs: 3); + x1 = x1["0:8"]; + x2 = x1; + + x = new NDArray[] { x1, x2 }; + var y = dataset.Train.Labels["0:8"]; + (model as Engine.Model).evaluate(x, y); + x1 = np.ones((1, 28, 28, 1), TF_DataType.TF_FLOAT); x2 = np.zeros((1, 28, 28, 1), TF_DataType.TF_FLOAT); var pred = model.predict((x1, x2)); From f8b7bdeb9b7fa10bf49b888934683f04febfc6e2 Mon Sep 17 00:00:00 2001 From: Aleksej Solomatin Date: Sun, 30 Jun 2024 22:43:01 +0300 Subject: [PATCH 179/182] test: Added a unit test of training a multi-input model using a dataset. --- .../MultiInputModelTest.cs | 75 +++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs b/test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs index bb293bd90..54b76d41a 100644 --- a/test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/MultiInputModelTest.cs @@ -2,6 +2,7 @@ using System; using Tensorflow.Keras.Optimizers; using Tensorflow.NumPy; +using static Tensorflow.Binding; using static Tensorflow.KerasApi; namespace Tensorflow.Keras.UnitTest @@ -66,5 +67,79 @@ public void LeNetModel() var pred = model.predict((x1, x2)); Console.WriteLine(pred); } + + [TestMethod] + public void LeNetModelDataset() + { + var inputs = keras.Input((28, 28, 1)); + var conv1 = keras.layers.Conv2D(16, (3, 3), activation: "relu", padding: "same").Apply(inputs); + var pool1 = keras.layers.MaxPooling2D((2, 2), 2).Apply(conv1); + var conv2 = keras.layers.Conv2D(32, (3, 3), activation: "relu", padding: "same").Apply(pool1); + var pool2 = keras.layers.MaxPooling2D((2, 2), 2).Apply(conv2); + var flat1 = keras.layers.Flatten().Apply(pool2); + + var inputs_2 = keras.Input((28, 28, 1)); + var conv1_2 = keras.layers.Conv2D(16, (3, 3), activation: "relu", padding: "same").Apply(inputs_2); + var pool1_2 = keras.layers.MaxPooling2D((4, 4), 4).Apply(conv1_2); + var conv2_2 = keras.layers.Conv2D(32, (1, 1), activation: "relu", padding: "same").Apply(pool1_2); + var pool2_2 = keras.layers.MaxPooling2D((2, 2), 2).Apply(conv2_2); + var flat1_2 = keras.layers.Flatten().Apply(pool2_2); + + var concat = keras.layers.Concatenate().Apply((flat1, flat1_2)); + var dense1 = keras.layers.Dense(512, activation: "relu").Apply(concat); + var dense2 = keras.layers.Dense(128, activation: "relu").Apply(dense1); + var dense3 = keras.layers.Dense(10, activation: "relu").Apply(dense2); + var output = keras.layers.Softmax(-1).Apply(dense3); + + var model = keras.Model((inputs, inputs_2), output); + model.summary(); + + var data_loader = new MnistModelLoader(); + + var dataset = data_loader.LoadAsync(new ModelLoadSetting + { + TrainDir = "mnist", + OneHot = false, + ValidationSize = 59900, + }).Result; + + var loss = keras.losses.SparseCategoricalCrossentropy(); + var optimizer = new Adam(0.001f); + model.compile(optimizer, loss, new string[] { "accuracy" }); + + NDArray x1 = np.reshape(dataset.Train.Data, (dataset.Train.Data.shape[0], 28, 28, 1)); + + var multiInputDataset = tf.data.Dataset.zip( + tf.data.Dataset.from_tensor_slices(x1), + tf.data.Dataset.from_tensor_slices(x1), + tf.data.Dataset.from_tensor_slices(dataset.Train.Labels) + ).batch(8); + multiInputDataset.FirstInputTensorCount = 2; + + model.fit(multiInputDataset, epochs: 3); + + x1 = x1["0:8"]; + + multiInputDataset = tf.data.Dataset.zip( + tf.data.Dataset.from_tensor_slices(x1), + tf.data.Dataset.from_tensor_slices(x1), + tf.data.Dataset.from_tensor_slices(dataset.Train.Labels["0:8"]) + ).batch(8); + multiInputDataset.FirstInputTensorCount = 2; + + (model as Engine.Model).evaluate(multiInputDataset); + + x1 = np.ones((1, 28, 28, 1), TF_DataType.TF_FLOAT); + var x2 = np.zeros((1, 28, 28, 1), TF_DataType.TF_FLOAT); + + multiInputDataset = tf.data.Dataset.zip( + tf.data.Dataset.from_tensor_slices(x1), + tf.data.Dataset.from_tensor_slices(x2) + ).batch(8); + multiInputDataset.FirstInputTensorCount = 2; + + var pred = model.predict(multiInputDataset); + Console.WriteLine(pred); + } } } From 93dda17944b6e34380897ad3480ac2218fb7398e Mon Sep 17 00:00:00 2001 From: Aleksej Solomatin Date: Sun, 30 Jun 2024 22:44:03 +0300 Subject: [PATCH 180/182] fix: Added support for training a multi-input model using a dataset. --- src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs | 14 +++++++++++++- src/TensorFlowNET.Keras/Engine/Model.Fit.cs | 13 ++++++++++++- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs index b3264429e..ec99d7ef9 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs @@ -112,7 +112,19 @@ public Dictionary evaluate(IDatasetV2 x, int verbose = 1, bool is Steps = data_handler.Inferredsteps }); - return evaluate(data_handler, callbacks, is_val, test_function); + Func> testFunction; + + if (data_handler.DataAdapter.GetDataset().structure.Length > 2 || + data_handler.DataAdapter.GetDataset().FirstInputTensorCount > 1) + { + testFunction = test_step_multi_inputs_function; + } + else + { + testFunction = test_function; + } + + return evaluate(data_handler, callbacks, is_val, testFunction); } /// diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs index 13a1b63bc..e1303513e 100644 --- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs +++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs @@ -179,9 +179,20 @@ public ICallback fit(IDatasetV2 dataset, StepsPerExecution = _steps_per_execution }); + Func> trainStepFunction; + + if (data_handler.DataAdapter.GetDataset().structure.Length > 2 || + data_handler.DataAdapter.GetDataset().FirstInputTensorCount > 1) + { + trainStepFunction = train_step_multi_inputs_function; + } + else + { + trainStepFunction = train_step_function; + } return FitInternal(data_handler, epochs, validation_step, verbose, callbacks, validation_data: validation_data, - train_step_func: train_step_function); + train_step_func: trainStepFunction); } History FitInternal(DataHandler data_handler, int epochs, int validation_step, int verbose, List callbackList, IDatasetV2 validation_data, From b6c5d26fab9a5eab72c0c81c554fec8412d86771 Mon Sep 17 00:00:00 2001 From: Leonardo Doherty <73901464+eLDoherty@users.noreply.github.com> Date: Mon, 13 Jan 2025 23:29:04 -0500 Subject: [PATCH 181/182] fix: Resolve fixed-size array issue Replace .ToArray() with .ToList() to allow dynamic modification of network_nodes in MapGraphNetwork() Replaced .ToArray() with .ToList() to resolve the issue where .Add() was called on a fixed-size array. This preventing the "Collection was of a fixed size" error when called something like this var model = keras.Model(new Tensors(new Tensor[] { encoder_inputs, decoder_inputs }), outputs: decoder_dense); --- src/TensorFlowNET.Keras/Engine/Functional.cs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/TensorFlowNET.Keras/Engine/Functional.cs b/src/TensorFlowNET.Keras/Engine/Functional.cs index 7347585f8..75854d82c 100644 --- a/src/TensorFlowNET.Keras/Engine/Functional.cs +++ b/src/TensorFlowNET.Keras/Engine/Functional.cs @@ -180,7 +180,7 @@ void ComputeTensorUsageCount() var (nodes_in_decreasing_depth, layer_indices) = BuildMap(outputs); var network_nodes = nodes_in_decreasing_depth .Select(node => MakeNodeKey(node.Layer.Name, node.Layer.InboundNodes.IndexOf(node))) - .ToArray(); + .ToList(); var nodes_depths = new Dictionary(); var layers_depths = new Dictionary(); @@ -221,7 +221,7 @@ void ComputeTensorUsageCount() layers_depths[input_layer] = 0; layer_indices[input_layer] = -1; nodes_depths[input_layer.InboundNodes[0]] = 0; - network_nodes.add(MakeNodeKey(input_layer.Name, 0)); + network_nodes.Add(MakeNodeKey(input_layer.Name, 0)); } } @@ -231,7 +231,7 @@ void ComputeTensorUsageCount() { if (!nodes_by_depth.ContainsKey(depth)) nodes_by_depth[depth] = new List(); - nodes_by_depth[depth].append(node); + nodes_by_depth[depth].Add(node); } var layers_by_depth = new Dictionary>(); @@ -239,7 +239,7 @@ void ComputeTensorUsageCount() { if (!layers_by_depth.ContainsKey(depth)) layers_by_depth[depth] = new List(); - layers_by_depth[depth].append(layer); + layers_by_depth[depth].Add(layer); } // Get sorted list of layer depths. @@ -260,7 +260,7 @@ void ComputeTensorUsageCount() // Get sorted list of node depths. depth_keys = nodes_by_depth.Keys.OrderBy(x => x).Reverse(); - return (network_nodes, nodes_by_depth, layers, layers_by_depth); + return (network_nodes.ToArray(), nodes_by_depth, layers, layers_by_depth); } string MakeNodeKey(string layer_name, int node_index) From 6ce6066551ce80202119a121a05b006aadd9ef37 Mon Sep 17 00:00:00 2001 From: Haiping Date: Wed, 22 Jan 2025 09:46:45 -0600 Subject: [PATCH 182/182] Update release.yml --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8f862e329..02601764c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -53,7 +53,7 @@ jobs: } - name: Upload packages artifacts - uses: actions/upload-artifact@v1.0.0 + uses: actions/upload-artifact@v4.0.0 with: name: "drop-ci-packages" path: './packages'