Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…genai into guozhong/add_hook_sample_for_transformers_v4_43
  • Loading branch information
wgzintel committed Sep 27, 2024
2 parents ee30ac6 + f053e5e commit 83e9f07
Show file tree
Hide file tree
Showing 94 changed files with 7,539 additions and 1,886 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/causal_lm_cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ jobs:
output.write('question:\n')
chat_history.append(gen_prompt(prompt))
chat_prompt = tokenizer.apply_chat_template(chat_history, tokenize=False, add_generation_prompt=True)
tokenized = tokenizer(chat_prompt, return_tensors='pt')
tokenized = tokenizer(chat_prompt, return_tensors='pt', add_special_tokens=False)
answer = model.generate(**tokenized, max_length=1000, do_sample=False)
answer_str = tokenizer.decode(answer[0, tokenized['input_ids'].numel():], skip_special_tokens=True)
chat_history.append(gen_answer(answer_str))
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ jobs:
if: |
always() &&
(needs.openvino_download.outputs.status == 'success' || needs.openvino_build.result == 'success')
timeout-minutes: 90
timeout-minutes: 120
defaults:
run:
shell: bash
Expand Down
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ project(OpenVINOGenAI
VERSION 2024.5.0.0
DESCRIPTION "OpenVINO GenAI"
HOMEPAGE_URL "https://github.com/openvinotoolkit/openvino.genai"
LANGUAGES CXX)
LANGUAGES CXX C)

# Find OpenVINODeveloperPackage first to compile with SDL flags
find_package(OpenVINODeveloperPackage ${OpenVINOGenAI_VERSION} QUIET
Expand Down Expand Up @@ -75,7 +75,7 @@ set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF)
set(CPACK_COMPONENTS_ALL core_genai core_genai_dev cpp_samples_genai licensing_genai openvino_tokenizers openvino_tokenizers_docs)
if(ENABLE_PYTHON)
list(APPEND CPACK_COMPONENTS_ALL pygenai_${Python3_VERSION_MAJOR}_${Python3_VERSION_MINOR})
endif()
endif()
if(WIN32 AND NOT DEFINED CPACK_GENERATOR)
set(CPACK_GENERATOR "ZIP")
endif()
Expand Down
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ It includes the following pipelines:
6. [multinomial_causal_lm](./samples/cpp/multinomial_causal_lm/README.md)
7. [prompt_lookup_decoding_lm](./samples/cpp/prompt_lookup_decoding_lm/README.md)
8. [speculative_decoding_lm](./samples/cpp/speculative_decoding_lm/README.md)
3. [Stable Diffuison (with LoRA) C++ image generation pipeline](./image_generation/stable_diffusion_1_5/cpp/README.md)
4. [Latent Consistency Model (with LoRA) C++ image generation pipeline](./image_generation/lcm_dreamshaper_v7/cpp/README.md)
3. [Stable Diffuison and Latent Consistency Model (with LoRA) C++ image generation pipeline](./samples/cpp/stable_diffusion/README.md)

### Requirements

Expand Down
115 changes: 113 additions & 2 deletions llm_bench/python/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,110 @@ def run_text_generation_genai(input_text, num, model, tokenizer, args, iter_data
llm_bench_utils.metrics_print.print_generated(num, warm_up=(num == 0), generated=generated_text[0])


def run_text_generation_genai_with_stream(input_text, num, model, tokenizer, args, iter_data_list, md5_list, prompt_index, streamer, model_precision, proc_id):
set_seed(args['seed'])
input_text_list = [input_text] * args['batch_size']
if args["output_dir"] is not None and num == 0:
for bs_index, in_text in enumerate(input_text_list):
llm_bench_utils.output_file.output_input_text(in_text, args, model_precision, prompt_index, bs_index, proc_id)
pt_inputs = tokenizer(input_text_list, return_tensors="pt")
input_token_size = pt_inputs.input_ids.shape[1]
pipe_tokenizer = model.get_tokenizer()
tok_encode_start = time.perf_counter()
input_data = pipe_tokenizer.encode(input_text_list)
tok_encode_end = time.perf_counter()
tok_encode_time = (tok_encode_end - tok_encode_start) * 1000
if args['batch_size'] > 1:
out_str = '[warm-up]' if num == 0 else '[{}]'.format(num)
out_str += " Batch_size={}, ".format(args['batch_size'])
out_str += 'all input token size after padding: {} * {}, '.format(input_token_size, args['batch_size'])
if args['infer_count'] is not None:
out_str += 'all max_output_token_size: {} * {}'.format(args['infer_count'], args['batch_size'])
log.info(out_str)
max_rss_mem_consumption = ''
max_uss_mem_consumption = ''
max_shared_mem_consumption = ''
if (args['mem_consumption'] == 1 and num == 0) or args['mem_consumption'] == 2:
mem_consumption.start_collect_memory_consumption()
max_gen_tokens = DEFAULT_OUTPUT_TOKEN_SIZE if args['infer_count'] is None else args['infer_count']
streamer.reset()
start = time.perf_counter()
generated_tokens = model.generate(input_data, max_new_tokens=max_gen_tokens, num_beams=args["num_beams"], streamer=streamer).tokens
end = time.perf_counter()
if (args['mem_consumption'] == 1 and num == 0) or args['mem_consumption'] == 2:
mem_consumption.end_collect_momory_consumption()
max_rss_mem_consumption, max_shared_mem_consumption, max_uss_mem_consumption = mem_consumption.get_max_memory_consumption()
mem_consumption.clear_max_memory_consumption()
generation_time = end - start
tok_decode_start = time.perf_counter()
generated_text = pipe_tokenizer.decode(generated_tokens)
tok_decode_end = time.perf_counter()
tok_decode_time = (tok_decode_end - tok_decode_start) * 1000
# Only text_gen need to minus length of input_data, because generated_text may include input_text
num_tokens = 0
result_md5_list = []
for bs_idx in range(args['batch_size']):
generated_text_len = len(generated_tokens[bs_idx])
num_tokens += generated_text_len
if generated_text_len > max_gen_tokens:
log.error('Output token size is over max output token size!')
result_text = generated_text[bs_idx]
if args["output_dir"] is not None:
llm_bench_utils.output_file.output_gen_text(result_text, args, model_precision, prompt_index, num, bs_idx, proc_id)
result_md5_list.append(hashlib.new("md5", result_text.encode(), usedforsecurity=False).hexdigest())
if len(md5_list[num]) == 0:
md5_list[num] = {prompt_index : result_md5_list}
else:
md5_list[num][prompt_index] = result_md5_list
per_token_time = generation_time * 1000 / (num_tokens / args['batch_size'])
tm_list = streamer.get_time_list()
log.debug('latency of all tokens:')
[log.debug('[{}]{:.4f}'.format(idx, tm)) for idx, tm in enumerate(tm_list)]
iter_data = gen_iterate_data(
num,
input_token_size * args['batch_size'],
len(tm_list),
num_tokens,
generation_time,
per_token_time,
result_md5_list,
max_rss_mem=max_rss_mem_consumption,
max_shared_mem=max_shared_mem_consumption,
max_uss_mem=max_uss_mem_consumption,
prompt_idx=prompt_index,
tokenization_time=(tok_encode_time, tok_decode_time)
)
iter_data_list.append(iter_data)
llm_bench_utils.metrics_print.print_metrics(
num,
iter_data,
tm_list,
[],
warm_up=(num == 0),
max_rss_mem=max_rss_mem_consumption,
max_shared_mem=max_shared_mem_consumption,
max_uss_mem=max_uss_mem_consumption,
tokenization_time=(tok_encode_time, tok_decode_time),
batch_size=args['batch_size']
)
if num > 0:
prev_md5 = md5_list[num - 1][prompt_index]
if result_md5_list != prev_md5:
log.warning(f"[{num}] Prompt[{prompt_index}]'s md5 {result_md5_list} "
f"is different from md5 of the {num - 1} iteration {prev_md5}")
llm_bench_utils.metrics_print.print_generated(num, warm_up=(num == 0), generated=generated_text[0])
if num == 1:
# if the device is CPU, throw exception
if args['devices'].lower().startswith('cpu') is True:
assert (result_md5_list == prev_md5)
else:
# throw exception
assert (result_md5_list == prev_md5)
else:
llm_bench_utils.metrics_print.print_generated(num, warm_up=(num == 0), generated=generated_text[0])
streamer.reset()


def run_text_generation_benchmark(model_path, framework, device, args, num_iters):
model, tokenizer, pretrain_time, bench_hook, use_genai = FW_UTILS[framework].create_text_gen_model(model_path, device, **args)
model_precision = llm_bench_utils.model_utils.get_model_precision(model_path.parts)
Expand All @@ -341,7 +445,12 @@ def run_text_generation_benchmark(model_path, framework, device, args, num_iters
f"prompt idx: {prompt_idx_list}, num_beams: {args['num_beams']}")

# if num_iters == 0, just output warm-up data
text_gen_fn = run_text_generation if not use_genai else run_text_generation_genai
if not use_genai:
text_gen_fn = run_text_generation
elif bench_hook is not None:
text_gen_fn = run_text_generation_genai_with_stream
else:
text_gen_fn = run_text_generation_genai
proc_id = os.getpid()
if args['subsequent'] is False:
for num in range(num_iters + 1):
Expand Down Expand Up @@ -703,7 +812,9 @@ def get_argprser():
)
parser.add_argument('-od', '--output_dir', help='Save the input text and generated text, images to files')
llm_bench_utils.model_utils.add_stateful_model_arguments(parser)
parser.add_argument("--genai", action="store_true")
parser.add_argument("--genai", action="store_true", help="Use OpenVINO GenAI optimized pipelines for benchmarking")
parser.add_argument("--use_cb", action="store_true", help="Use Continuous Batching inference mode")
parser.add_argument("--cb_config", required=False, default=None, help="Path to file with Continuous Batching Scheduler settings")
parser.add_argument(
'--end_token_stopping',
action='store_true',
Expand Down
47 changes: 25 additions & 22 deletions llm_bench/python/llm_bench_utils/metrics_print.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,6 @@ def print_metrics(
iter_num, iter_data, tms=None, tms_infer=None, warm_up=False, max_rss_mem=-1, max_shared_mem=-1,
max_uss_mem=-1, stable_diffusion=None, tokenization_time=None, batch_size=1
):
if tms is None:
tms = []
if tms_infer is None:
tms_infer = []
iter_str = str(iter_num)
if warm_up:
iter_str = 'warm-up'
Expand All @@ -36,25 +32,27 @@ def print_metrics(
if output_str != '':
output_str = ' '.join(['[{}]'.format(iter_str), output_str])
log.info(output_str)
if len(tms) > 0:
if tms is not None:
iter_data['first_token_latency'] = tms[0] * 1000 if len(tms) > 0 else -1
iter_data['other_tokens_avg_latency'] = sum(tms[1:]) / (len(tms) - 1) * 1000 if len(tms) > 1 else -1
first_token_latency = 'NA' if iter_data['first_token_latency'] == -1 else f"{iter_data['first_token_latency']:.2f} ms/{latency_unit}"
other_token_latency = 'NA' if iter_data['other_tokens_avg_latency'] == -1 else f"{iter_data['other_tokens_avg_latency']:.2f} ms/{latency_unit}"
log.info(
f"[{iter_str}] First token latency: {iter_data['first_token_latency']:.2f} ms/{latency_unit}, "
f"other tokens latency: {iter_data['other_tokens_avg_latency']:.2f} ms/{latency_unit}, len of tokens: {len(tms)} * {batch_size}",
f"[{iter_str}] First token latency: {first_token_latency}, "
f"other tokens latency: {other_token_latency}, len of tokens: {len(tms)} * {batch_size}",
)
else:
if tokenization_time:
if len(tms) == 0:
log.warning(f'[{iter_str}] No hook data output for first token latency and other tokens latency')
if len(tms_infer) > 0:
if tms_infer is not None:
iter_data['first_token_infer_latency'] = tms_infer[0] * 1000 if len(tms_infer) > 0 else -1
iter_data['other_tokens_infer_avg_latency'] = sum(tms_infer[1:]) / (len(tms_infer) - 1) * 1000 if len(tms_infer) > 1 else -1
first_infer_latency = 'NA' if iter_data['first_token_infer_latency'] == -1 else f"{iter_data['first_token_infer_latency']:.2f} ms/infer"
other_infer_latency = 'NA' if iter_data['other_tokens_infer_avg_latency'] == -1 else f"{iter_data['other_tokens_infer_avg_latency']:.2f} ms/infer"
log.info(
f"[{iter_str}] First infer latency: {iter_data['first_token_infer_latency']:.2f} ms/infer, "
f"other infers latency: {iter_data['other_tokens_infer_avg_latency']:.2f} ms/infer, inference count: {len(tms_infer)}",
f"[{iter_str}] First infer latency: {first_infer_latency}, "
f"other infers latency: {other_infer_latency}, inference count: {len(tms_infer)}",
)
else:
if tokenization_time:
if len(tms_infer) == 0:
log.warning(f'[{iter_str}] No hook data output for first infer latency and other infers latency')
if stable_diffusion is not None:
print_stable_diffusion_infer_latency(iter_str, iter_data, stable_diffusion)
Expand Down Expand Up @@ -112,8 +110,10 @@ def print_ldm_unet_vqvae_infer_latency(iter_num, iter_data, tms=None, warm_up=Fa
iter_data['first_token_infer_latency'] = iter_data['first_token_latency']
iter_data['other_tokens_infer_avg_latency'] = iter_data['other_tokens_avg_latency']

log.info(f"[{iter_str}] First step of unet latency: {iter_data['first_token_latency']:.2f} ms/step, "
f"other steps of unet latency: {iter_data['other_tokens_avg_latency']:.2f} ms/step",)
first_token_latency = 'NA' if iter_data['first_token_latency'] == -1 else f"{iter_data['first_token_latency']:.2f} ms/step"
other_token_latency = 'NA' if iter_data['other_tokens_avg_latency'] == -1 else f"{iter_data['other_tokens_avg_latency']:.2f} ms/step"
log.info(f"[{iter_str}] First step of unet latency: {first_token_latency}, "
f"other steps of unet latency: {other_token_latency}",)
if len_tms > 1:
log.info(f"[{iter_str}] Unet latency: {(sum(tms[0:(len_tms - 1)]) / (len_tms - 1)) * 1000:.2f} ms/step, "
f"vqvae decoder latency: {tms[len_tms - 1] * 1000:.2f} ms/step, "
Expand Down Expand Up @@ -149,14 +149,17 @@ def output_avg_statis_tokens(prompt_dict, prompt_idx_list, iter_data_list, batch
latency_unit = '{}tokens'.format(batch_size)
else:
latency_unit = '{}steps'.format(batch_size)
avg_1st_token_latency = 'NA' if avg_1st_token_latency < 0 else f'{avg_1st_token_latency:.2f} ms/{latency_unit}'
avg_2nd_tokens_latency = 'NA' if avg_2nd_tokens_latency < 0 else f'{avg_2nd_tokens_latency:.2f} ms/{latency_unit}'
avg_2nd_token_tput = 'NA' if avg_2nd_tokens_latency == 'NA' else f'{avg_2nd_token_tput:.2f} {latency_unit}s/s'
if is_text_gen is True:
prompt_dict[p_idx] = '\n[ INFO ] [Average] Prompt[{}] Input token size: {}, 1st token lantency: {:.2f} ms/{}, ' \
'2nd tokens latency: {:.2f} ms/{}, 2nd tokens throughput: {:.2f} tokens/s' \
.format(p_idx, avg_input_size, avg_1st_token_latency, latency_unit, avg_2nd_tokens_latency, latency_unit, avg_2nd_token_tput)
prompt_dict[p_idx] = '\n[ INFO ] [Average] Prompt[{}] Input token size: {}, 1st token lantency: {}, ' \
'2nd token lantency: {}, 2nd tokens throughput: {}' \
.format(p_idx, avg_input_size, avg_1st_token_latency, avg_2nd_tokens_latency, avg_2nd_token_tput)
else:
prompt_dict[p_idx] = '\n[ INFO ] [Average] Prompt[{}] 1st step of unet latency {:.2f} ms/{}, ' \
'2nd steps of unet latency: {:.2f} ms/{}, 2nd steps throughput: {:.2f} steps/s' \
.format(p_idx, avg_1st_token_latency, latency_unit, avg_2nd_tokens_latency, latency_unit, avg_2nd_token_tput)
prompt_dict[p_idx] = '\n[ INFO ] [Average] Prompt[{}] 1st step of unet latency: {}, ' \
'2nd steps of unet latency: {}, 2nd steps throughput: {}' \
.format(p_idx, avg_1st_token_latency, avg_2nd_tokens_latency, avg_2nd_token_tput)


def print_average(iter_data_list, prompt_idx_list, batch_size, is_text_gen=False):
Expand Down
8 changes: 8 additions & 0 deletions llm_bench/python/llm_bench_utils/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ def analyze_args(args):
model_args['subsequent'] = args.subsequent
model_args['output_dir'] = args.output_dir
model_args['genai'] = args.genai
model_args["use_cb"] = args.use_cb
model_args['devices'] = args.device
model_args['prompt_index'] = [] if args.prompt_index is not None else None
if model_args['prompt_index'] is not None:
Expand Down Expand Up @@ -164,6 +165,13 @@ def analyze_args(args):
log.info(f"PT Config={model_args['config']}")
model_args['model_type'] = get_model_type(model_name, use_case, model_framework)
model_args['model_name'] = model_name

if args.use_cb and not args.genai:
raise RuntimeError("Continious batching mode supported only via OpenVINO GenAI")
cb_config = None
if args.cb_config:
cb_config = get_config(args.cb_config)
model_args["cb_config"] = cb_config
return model_path, model_framework, model_args, model_name


Expand Down
20 changes: 16 additions & 4 deletions llm_bench/python/llm_bench_utils/output_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,10 +106,22 @@ def gen_data_to_csv(result, iter_data, pretrain_time):
result['output_size'] = iter_data['output_size']
result['latency(ms)'] = round(latency, 5) if latency != '' else latency
result['result_md5'] = iter_data['result_md5']
result['1st_latency(ms)'] = round(first_latency, 5) if first_latency != '' else first_latency
result['2nd_avg_latency(ms)'] = round(other_latency, 5) if other_latency != '' else other_latency
result['1st_infer_latency(ms)'] = round(first_token_infer_latency, 5) if first_token_infer_latency != '' else first_token_infer_latency
result['2nd_infer_avg_latency(ms)'] = round(other_token_infer_latency, 5) if other_token_infer_latency != '' else other_token_infer_latency
if first_latency < 0:
result['1st_latency(ms)'] = 'NA'
else:
result['1st_latency(ms)'] = round(first_latency, 5) if first_latency != '' else first_latency
if other_latency < 0:
result['2nd_avg_latency(ms)'] = 'NA'
else:
result['2nd_avg_latency(ms)'] = round(other_latency, 5) if other_latency != '' else other_latency
if first_token_infer_latency < 0:
result['1st_infer_latency(ms)'] = 'NA'
else:
result['1st_infer_latency(ms)'] = round(first_token_infer_latency, 5) if first_token_infer_latency != '' else first_token_infer_latency
if other_token_infer_latency < 0:
result['2nd_infer_avg_latency(ms)'] = 'NA'
else:
result['2nd_infer_avg_latency(ms)'] = round(other_token_infer_latency, 5) if other_token_infer_latency != '' else other_token_infer_latency
result['max_rss_mem(MB)'] = round(rss_mem, 5) if rss_mem != '' else rss_mem
result['max_uss_mem(MB)'] = round(uss_mem, 5) if uss_mem != '' else uss_mem
result['max_shared_mem(MB)'] = round(shared_mem, 5) if shared_mem != '' else shared_mem
Expand Down
Loading

0 comments on commit 83e9f07

Please sign in to comment.