@@ -42,8 +42,8 @@ absl::Status PopulateInputTextTensorForBERT(
42
42
const CluRequest& request, int token_id_tensor_idx,
43
43
int token_mask_tensor_idx, int token_type_id_tensor_idx,
44
44
const tflite::support::text::tokenizer::BertTokenizer* tokenizer,
45
- size_t max_seq_len, int max_history_turns, tflite::Interpreter* interpreter,
46
- Artifacts* artifacts) {
45
+ size_t max_seq_len, int max_history_turns,
46
+ core::TfLiteEngine::Interpreter* interpreter, Artifacts* artifacts) {
47
47
size_t seq_len;
48
48
int64_t * tokens_tensor =
49
49
interpreter->typed_input_tensor <int64_t >(token_id_tensor_idx);
@@ -116,8 +116,9 @@ absl::Status PopulateInputTextTensorForBERT(
116
116
return absl::OkStatus ();
117
117
}
118
118
119
- absl::StatusOr<int > GetInputSeqDimSize (const size_t input_idx,
120
- const tflite::Interpreter* interpreter) {
119
+ absl::StatusOr<int > GetInputSeqDimSize (
120
+ const size_t input_idx,
121
+ const core::TfLiteEngine::Interpreter* interpreter) {
121
122
if (input_idx >= interpreter->inputs ().size ()) {
122
123
return absl::InternalError (absl::StrCat (
123
124
" input_idx should be less than interpreter input numbers. " , input_idx,
@@ -132,14 +133,15 @@ absl::StatusOr<int> GetInputSeqDimSize(const size_t input_idx,
132
133
return tflite::SizeOfDimension (tensor, 1 );
133
134
}
134
135
135
- absl::Status AbstractModule::Init (tflite ::Interpreter* interpreter,
136
+ absl::Status AbstractModule::Init (core::TfLiteEngine ::Interpreter* interpreter,
136
137
const BertCluAnnotatorOptions* options) {
137
138
interpreter_ = interpreter;
138
139
return absl::OkStatus ();
139
140
}
140
141
141
142
absl::StatusOr<std::unique_ptr<AbstractModule>> UtteranceSeqModule::Create (
142
- tflite::Interpreter* interpreter, const TensorIndexMap* tensor_index_map,
143
+ core::TfLiteEngine::Interpreter* interpreter,
144
+ const TensorIndexMap* tensor_index_map,
143
145
const BertCluAnnotatorOptions* options,
144
146
const tflite::support::text::tokenizer::BertTokenizer* tokenizer) {
145
147
auto out = std::make_unique<UtteranceSeqModule>();
@@ -187,7 +189,8 @@ AbstractModule::NamesAndConfidencesFromOutput(int names_tensor_idx,
187
189
}
188
190
189
191
absl::StatusOr<std::unique_ptr<AbstractModule>> DomainModule::Create (
190
- tflite::Interpreter* interpreter, const TensorIndexMap* tensor_index_map,
192
+ core::TfLiteEngine::Interpreter* interpreter,
193
+ const TensorIndexMap* tensor_index_map,
191
194
const BertCluAnnotatorOptions* options) {
192
195
auto out = std::make_unique<DomainModule>();
193
196
out->tensor_index_map_ = tensor_index_map;
@@ -215,7 +218,8 @@ absl::Status DomainModule::Postprocess(Artifacts* artifacts,
215
218
}
216
219
217
220
absl::StatusOr<std::unique_ptr<AbstractModule>> IntentModule::Create (
218
- tflite::Interpreter* interpreter, const TensorIndexMap* tensor_index_map,
221
+ core::TfLiteEngine::Interpreter* interpreter,
222
+ const TensorIndexMap* tensor_index_map,
219
223
const BertCluAnnotatorOptions* options) {
220
224
auto out = std::make_unique<IntentModule>();
221
225
out->tensor_index_map_ = tensor_index_map;
@@ -261,7 +265,8 @@ absl::Status IntentModule::Postprocess(Artifacts* artifacts,
261
265
}
262
266
263
267
absl::StatusOr<std::unique_ptr<AbstractModule>> SlotModule::Create (
264
- tflite::Interpreter* interpreter, const TensorIndexMap* tensor_index_map,
268
+ core::TfLiteEngine::Interpreter* interpreter,
269
+ const TensorIndexMap* tensor_index_map,
265
270
const BertCluAnnotatorOptions* options) {
266
271
auto out = std::make_unique<SlotModule>();
267
272
out->tensor_index_map_ = tensor_index_map;
0 commit comments