8000 Added how to use commit0 for sampling during STAR training by wenting-zhao · Pull Request #105 · commit-0/commit0 · GitHub
[go: up one dir, main page]

Skip to content

Added how to use commit0 for sampling during STAR training #105

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 18 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fixed multiple epochs
  • Loading branch information
wenting-zhao committed Dec 14, 2024
commit 177a5a8e1795719663ef8ce77642bcd558f311b0
9 changes: 5 additions & 4 deletions examples/star/star.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Main STaR Loop"""

from copy import deepcopy
from datasets import Dataset, DatasetDict, load_dataset
from inference import generate_predictions
from train import train
Expand All @@ -21,13 +22,13 @@ def main():
ds[split] = ds[split].add_column(name="text", column=texts)

model_name = args.model_name_or_path
ds["train"] = ds["train"].select(range(10))
output_dir = deepcopy(args.output_dir)
for i in range(args.iteration):
# sample
all_samples = generate_predictions(
model_name, ds["train"], args.temperature, args.n
)
ds["train"].add_column(name="sample", column=all_samples).to_json(f"{args.output_dir}/data/samples-iter{i}.json")
ds["train"].add_column(name="sample", column=all_samples).to_json(f"{output_dir}/data/samples-iter{i}.json")
assert len(ds["train"]) == len(all_samples)

# verify and construct the training set
Expand All @@ -43,10 +44,10 @@ def main():
passed_examples.append(example)
break
raw_datasets = DatasetDict({"train": Dataset.from_list(passed_examples), "validation": ds["validation"]})
raw_datasets["train"].to_json(f"{args.output_dir}/data/verified-samples-iter{i}.json")
raw_datasets["train"].to_json(f"{output_dir}/data/verified-samples-iter{i}.json")

# train
args.output_dir = f"{args.output_dir}/models-iter{i}"
args.output_dir = f"{output_dir}/models-iter{i}"
train(raw_datasets, model_name, args)
model_name = args.output_dir

Expand Down
6 changes: 0 additions & 6 deletions examples/star/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,10 +254,6 @@ def tokenize_function(examples):
# The trackers initializes automatically on the main process.
if args.with_tracking:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config[
"lr_scheduler_type"
].value
accelerator.init_trackers("clm_no_trainer", experiment_config)

# Train!
Expand Down Expand Up @@ -407,8 +403,6 @@ def tokenize_function(examples):
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump({"perplexity": perplexity}, f)
cleanup(model)
#cleanup(optimizer)
#cleanup(lr_scheduler)


if __name__ == "__main__":
Expand Down
Loading
0