8000 Support for OuteTTS 1.0 by edwko · Pull Request #12794 · ggml-org/llama.cpp · GitHub
[go: up one dir, main page]

Skip to content

Support for OuteTTS 1.0 #12794

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: master
Choose a base branch
from
Draft
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Revert tts.cpp
  • Loading branch information
edwko committed Apr 7, 2025
commit 38126e9fd33265b1d0d46e5c913a3fa7139fc2a1
18 changes: 7 additions & 11 deletions examples/tts/tts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ using json = nlohmann::ordered_json;
enum outetts_version {
OUTETTS_V0_2,
OUTETTS_V0_3,
OUTETTS_V1_0,
};

//
Expand Down Expand Up @@ -578,12 +577,7 @@ int main(int argc, char ** argv) {

const llama_vocab * vocab = llama_model_get_vocab(model_ttc);

// TODO: refactor in a common struct
params.model = params.vocoder.model;
params.model_url = params.vocoder.model_url;
params.hf_repo = params.vocoder.hf_repo;
params.hf_file = params.vocoder.hf_file;

params.model = params.vocoder.model;
params.embedding = true;

common_init_result llama_init_cts = common_init_from_params(params);
Expand Down Expand Up @@ -700,11 +694,13 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14
const std::string voice_data = audio_data;

auto tmp = common_tokenize(vocab, voice_data, false, true);
printf("\n\n");

std::ostringstream tokens_oss;
for (size_t i = 0; i < tmp.size(); ++i) {
printf("%d, ", tmp[i]);
tokens_oss << tmp[i] << ", ";
}
printf("\n\n");
LOG_INF("\n\n%s: llama tokens: %s\n\n", __func__, tokens_oss.str().c_str());

prompt_add(prompt_inp, tmp);
#else
prompt_add(prompt_inp, llama_tokens {
Expand Down Expand Up @@ -1091,4 +1087,4 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14
llama_backend_free();

return retval;
}
}
0