Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

perf: call analysis scripts from python to reduce the number of reading trace data #161

Merged
merged 5 commits into from
Feb 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions report/analyze_path/add_path_to_architecture.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,10 +183,10 @@ def add_path_to_architecture(args, arch: Architecture):
_logger.info('<<< Add Path: Start >>>')
# Read target path information from JSON
try:
with open(args.target_path_json[0], encoding='UTF-8') as f_json:
with open(args.target_path_json, encoding='UTF-8') as f_json:
target_path_json = json.load(f_json)
except:
_logger.error(f'Unable to read {args.target_path_json[0]}')
_logger.error(f'Unable to read {args.target_path_json}')
sys.exit(-1)
comm_filter, node_filter = create_search_paths_filter(
target_path_json['ignore_topic_list'] if 'ignore_topic_list' in target_path_json else None,
Expand Down Expand Up @@ -260,7 +260,7 @@ def parse_arg():
parser = argparse.ArgumentParser(
description='Script to add path information to architecture file')
parser.add_argument('trace_data', nargs=1, type=str)
parser.add_argument('target_path_json', nargs=1, type=str)
parser.add_argument('--target_path_json', type=str, default='target_path.json')
parser.add_argument('--architecture_file_path', type=str, default='architecture_path.yaml')
parser.add_argument('--use_latest_message', action='store_true', default=True)
parser.add_argument('--max_node_depth', type=int, default=15)
Expand All @@ -277,7 +277,7 @@ def main():
_logger = create_logger(__name__, logging.DEBUG if args.verbose else logging.INFO)

_logger.debug(f'trace_data: {args.trace_data[0]}')
_logger.debug(f'target_path_json: {args.target_path_json[0]}')
_logger.debug(f'target_path_json: {args.target_path_json}')
_logger.debug(f'architecture_file_path: {args.architecture_file_path}')
_logger.debug(f'use_latest_message: {args.use_latest_message}')
_logger.debug(f'max_node_depth: {args.max_node_depth}')
Expand Down
2 changes: 1 addition & 1 deletion report/find_valid_duration/find_valid_duration.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,5 @@ script_path=$(dirname "$0")/..
trace_data_name=$(basename "${trace_data}")

# Path analysis
python3 "${script_path}"/analyze_path/add_path_to_architecture.py "${trace_data}" "${target_path_json}" --architecture_file_path=architecture_path.yaml --max_node_depth="${max_node_depth}" --timeout="${timeout}" -v
python3 "${script_path}"/analyze_path/add_path_to_architecture.py "${trace_data}" --target_path_json="${target_path_json}" --architecture_file_path=architecture_path.yaml --max_node_depth="${max_node_depth}" --timeout="${timeout}" -v
python3 "${script_path}"/find_valid_duration/find_valid_duration.py "${trace_data}" --architecture_file_path=architecture_path.yaml -v
119 changes: 119 additions & 0 deletions report/report_analysis/analyze_all.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
# Copyright 2022 Tier IV, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to make analysis reports
"""
from __future__ import annotations
import sys
import os
from pathlib import Path
import argparse
from distutils.util import strtobool
import logging
from caret_analyze import Architecture, Application, Lttng
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..')
from common.utils import create_logger, read_trace_data
from analyze_node import analyze_node
from analyze_path import add_path_to_architecture, analyze_path
from find_valid_duration import find_valid_duration



def parse_arg():
"""Parse arguments"""
parser = argparse.ArgumentParser(
description='Script to make analysis reports')
parser.add_argument('trace_data', nargs=1, type=str)
parser.add_argument('dest_dir', nargs=1, type=str)
parser.add_argument('--component_list_json', type=str, default='')
parser.add_argument('--start_strip', type=float, default=0.0,
help='Start strip [sec] to load trace data')
parser.add_argument('--end_strip', type=float, default=0.0,
help='End strip [sec] to load trace data')
parser.add_argument('--sim_time', type=strtobool, default=False)
parser.add_argument('-f', '--force', action='store_true', default=False,
help='Overwrite report directory')
parser.add_argument('-v', '--verbose', action='store_true', default=False)

# options for add_path
parser.add_argument('--target_path_json', type=str, default='target_path.json')
parser.add_argument('--architecture_file_path', type=str, default='architecture_path.yaml')
parser.add_argument('--use_latest_message', action='store_true', default=True)
parser.add_argument('--max_node_depth', type=int, default=15)
parser.add_argument('--timeout', type=int, default=120)

# options for path analysis
parser.add_argument('-m', '--message_flow', type=strtobool, default=False,
help='Output message flow graph')

# options for find_valid_duration
parser.add_argument('--find_valid_duration', type=strtobool, default=False)
parser.add_argument('--duration', type=float, default=1200.0,
help='Duration [sec] to load trace data')
parser.add_argument('--skip_first_num', type=int, default=1,
help='The number to skip the first n-th trace data')

args = parser.parse_args()
return args


def main():
"""Main function"""
args = parse_arg()
logger = create_logger(__name__, logging.DEBUG if args.verbose else logging.INFO)
args.trace_data = args.trace_data[0]
logger.debug(f'trace_data: {args.trace_data}')
args.dest_dir = args.dest_dir[0]
logger.debug(f'dest_dir: {args.dest_dir}')
logger.debug(f'component_list_json: {args.component_list_json}')
logger.debug(f'start_strip: {args.start_strip}, end_strip: {args.end_strip}')
logger.debug(f'sim_time: {args.sim_time}')
logger.debug(f'target_path_json: {args.target_path_json}')
logger.debug(f'architecture_file_path: {args.architecture_file_path}')
logger.debug(f'use_latest_message: {args.use_latest_message}')
logger.debug(f'max_node_depth: {args.max_node_depth}')
logger.debug(f'timeout: {args.timeout}')
args.message_flow = True if args.message_flow == 1 else False
logger.debug(f'message_flow: {args.message_flow}')
logger.debug(f'find_valid_duration: {args.find_valid_duration}')
logger.debug(f'duration: {args.duration}')
logger.debug(f'skip_first_num: {args.skip_first_num}')

# Read trace data
lttng = read_trace_data(args.trace_data, args.start_strip, args.end_strip, False)
arch = Architecture('lttng', args.trace_data)

# Create architecture for path analysis
add_path_to_architecture.add_path_to_architecture(args, arch)
arch_path = Architecture('yaml', args.architecture_file_path)
app = Application(arch_path, lttng)

# Find duration to be analyzed
# Run path analysis and find start point(sec) where the topic runs in the paths
if args.find_valid_duration:
start_strip, end_strip = find_valid_duration.analyze(args, lttng, arch_path, app)
args.start_strip = start_strip
args.end_strip = end_strip
logger.info(f'Find valid duration. start_strip: {args.start_strip}, end_strip: {args.end_strip}')
logger.info(f'Reload trace data')
lttng = read_trace_data(args.trace_data, args.start_strip, args.end_strip, False)
app = Application(arch_path, lttng)

# Analyze
analyze_path.analyze(args, lttng, arch_path, app, args.dest_dir + '/analyze_path')
analyze_node.analyze(args, lttng, arch, app, args.dest_dir + '/analyze_node')


if __name__ == '__main__':
main()
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from pathlib import Path
import yaml
import flask
from markupsafe import Markup

Check warning on line 24 in report/report_analysis/make_html_analysis.py

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (markupsafe)
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..')
from common.utils import ComponentManager
from common.utils import read_note_text
Expand Down Expand Up @@ -53,12 +53,14 @@

def get_component_list(report_dir: str) -> list[str]:
"""Create component name list in node analysis"""
node_report_dir= report_dir + '/analyze_node'

component_list = []
for component_name, _ in ComponentManager().component_dict.items():
if os.path.isdir(os.path.join(node_report_dir, component_name)):
component_list.append(component_name)
yaml_file_path = os.path.join(report_dir, 'analyze_node', component_name, 'stats_node.yaml')
if os.path.isfile(yaml_file_path):
with open(yaml_file_path, 'r', encoding='utf-8') as f_yaml:
stats = yaml.safe_load(f_yaml)
if stats:
component_list.append(component_name)
return component_list


Expand All @@ -67,7 +69,8 @@
stats_dict = {}
component_list = get_component_list(report_dir)
for component_name in component_list:
with open(report_dir + '/analyze_node/' + component_name + '/stats_node.yaml', 'r', encoding='utf-8') as f_yaml:
yaml_file_path = os.path.join(report_dir, 'analyze_node', component_name, 'stats_node.yaml')
with open(yaml_file_path, 'r', encoding='utf-8') as f_yaml:
stats = yaml.safe_load(f_yaml)
stats_dict[component_name] = stats
return stats_dict
Expand All @@ -80,7 +83,7 @@
return stats


def find_latency_topk(component_name, stats_node, numk=20) -> None:

Check warning on line 86 in report/report_analysis/make_html_analysis.py

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (numk)
"""Find callback functions whose latency time is the longest(top5), and add this information into stats"""
callback_latency_list = []
for node_name, node_info in stats_node.items():
Expand All @@ -89,7 +92,7 @@
trigger = callback_info['subscribe_topic_name'] if callback_info['period_ns'] == -1 else f'{float(callback_info["period_ns"]) / 1e6} [ms]'
callback_latency_list.append({
'link': f'analyze_node/{component_name}/index{node_name.replace("/", "_")}.html',
'displayname': Markup(node_name + '<br>' + callback_info['callback_legend'] + ': ' + trigger),

Check warning on line 95 in report/report_analysis/make_html_analysis.py

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (displayname)
'avg': callback_info['Latency']['avg'] if isinstance(callback_info['Latency']['avg'], (int, float)) else 0,
'min': callback_info['Latency']['min'] if isinstance(callback_info['Latency']['min'], (int, float)) else 0,
'max': callback_info['Latency']['max'] if isinstance(callback_info['Latency']['max'], (int, float)) else 0,
Expand All @@ -99,7 +102,7 @@
})

callback_latency_list = sorted(callback_latency_list, reverse=True, key=lambda x: x['p50'])
callback_latency_list = callback_latency_list[:numk]

Check warning on line 105 in report/report_analysis/make_html_analysis.py

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (numk)
stats_node['latency_topk'] = callback_latency_list


Expand All @@ -119,7 +122,7 @@
note_text_top, note_text_bottom = read_note_text(trace_data_dir, dest_dir, args.note_text_top, args.note_text_bottom)

destination_path = f'{dest_dir}/{index_filename}.html'
template_path = f'{Path(__file__).resolve().parent}/template_report_analysis.html'
template_path = f'{Path(__file__).resolve().parent}/template_html_analysis.html'
title = 'Analysis report'
sub_title = dest_dir.split('/')[-1]
render_page(title, sub_title, destination_path, template_path, component_list, stats_node_dict,
Expand Down
59 changes: 39 additions & 20 deletions report/report_analysis/make_report.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
set -e

# Variable settings
use_python=true # to avoid reading trace data every step
script_path=$(dirname "$0")/..
trace_data_name=$(basename "${trace_data}")
report_dir_name=output/report_"${trace_data_name}"
Expand All @@ -18,25 +19,43 @@ fi
cp "${component_list_json}" "${report_dir_name}"/.
cp "${target_path_json}" "${report_dir_name}"/.

# Path analysis
python3 "${script_path}"/analyze_path/add_path_to_architecture.py "${trace_data}" "${target_path_json}" --architecture_file_path=architecture_path.yaml --max_node_depth="${max_node_depth}" --timeout="${timeout}" -v
python3 "${script_path}"/analyze_path/analyze_path.py "${trace_data}" "${report_dir_name}" --architecture_file_path=architecture_path.yaml --start_strip "${start_strip}" --end_strip "${end_strip}" --sim_time "${sim_time}" -f -v -m "${draw_all_message_flow}"
python3 "${script_path}"/analyze_path/make_report_analyze_path.py "${report_dir_name}"

# Track of response time
python3 "${script_path}"/track_path/make_report_track_path.py "${report_dir_name}" "${report_store_dir}" --relpath_from_report_store_dir="${relpath_from_report_store_dir}"

# Node analysis
python3 "${script_path}"/analyze_node/analyze_node.py "${trace_data}" "${report_dir_name}" --component_list_json="${component_list_json}" --start_strip "${start_strip}" --end_strip "${end_strip}" --sim_time "${sim_time}" -f -v
python3 "${script_path}"/analyze_node/make_report_analyze_node.py "${report_dir_name}"

# # Check callback health
# python3 "${script_path}"/check_callback_sub/check_callback_sub.py "${trace_data}" "${report_dir_name}" --component_list_json="${component_list_json}" --start_strip "${start_strip}" --end_strip "${end_strip}" -f -v
# python3 "${script_path}"/check_callback_sub/make_report_sub.py "${report_dir_name}"
# python3 "${script_path}"/check_callback_timer/check_callback_timer.py "${trace_data}" "${report_dir_name}" --component_list_json="${component_list_json}" --start_strip "${start_strip}" --end_strip "${end_strip}" -f -v
# python3 "${script_path}"/check_callback_timer/make_report_timer.py "${report_dir_name}"

# Make top page
python3 "${script_path}"/report_analysis/make_report_analysis.py "${trace_data}" "${report_dir_name}" --note_text_top "${note_text_top}" --note_text_bottom "${note_text_bottom}" --num_back 3
if ${use_python}; then
find_valid_duration=${find_valid_duration:-false}
duration=${duration:-1200}
# Analyze
python3 "${script_path}"/report_analysis/analyze_all.py "${trace_data}" "${report_dir_name}" \
--component_list_json="${component_list_json}" \
--start_strip "${start_strip}" \
--end_strip "${end_strip}" \
--sim_time "${sim_time}" \
--target_path_json="${target_path_json}" \
--architecture_file_path=architecture_path.yaml \
--max_node_depth="${max_node_depth}" \
--timeout="${timeout}" \
--find_valid_duration="${find_valid_duration}" \
--duration="${duration}" \
-f -v

# Make html pages
python3 "${script_path}"/analyze_node/make_report_analyze_node.py "${report_dir_name}"
python3 "${script_path}"/analyze_path/make_report_analyze_path.py "${report_dir_name}"
python3 "${script_path}"/track_path/make_report_track_path.py "${report_dir_name}" "${report_store_dir}" --relpath_from_report_store_dir="${relpath_from_report_store_dir}"
python3 "${script_path}"/report_analysis/make_html_analysis.py "${trace_data}" "${report_dir_name}" --note_text_top "${note_text_top}" --note_text_bottom "${note_text_bottom}" --num_back 3
else
# Path analysis
python3 "${script_path}"/analyze_path/add_path_to_architecture.py "${trace_data}" --target_path_json="${target_path_json}" --architecture_file_path=architecture_path.yaml --max_node_depth="${max_node_depth}" --timeout="${timeout}" -v
python3 "${script_path}"/analyze_path/analyze_path.py "${trace_data}" "${report_dir_name}" --architecture_file_path=architecture_path.yaml --start_strip "${start_strip}" --end_strip "${end_strip}" --sim_time "${sim_time}" -f -v -m "${draw_all_message_flow}"
python3 "${script_path}"/analyze_path/make_report_analyze_path.py "${report_dir_name}"

# Track of response time
python3 "${script_path}"/track_path/make_report_track_path.py "${report_dir_name}" "${report_store_dir}" --relpath_from_report_store_dir="${relpath_from_report_store_dir}"

# Node analysis
python3 "${script_path}"/analyze_node/analyze_node.py "${trace_data}" "${report_dir_name}" --component_list_json="${component_list_json}" --start_strip "${start_strip}" --end_strip "${end_strip}" --sim_time "${sim_time}" -f -v
python3 "${script_path}"/analyze_node/make_report_analyze_node.py "${report_dir_name}"

# Make top page
python3 "${script_path}"/report_analysis/make_html_analysis.py "${trace_data}" "${report_dir_name}" --note_text_top "${note_text_top}" --note_text_bottom "${note_text_bottom}" --num_back 3
fi

echo "<<< OK. All report pages are created >>>"
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@


app = flask.Flask(__name__)
app.jinja_env.add_extension('jinja2.ext.loopcontrols')

Check warning on line 33 in report/report_validation/make_html_validation.py

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (loopcontrols)


def make_report(report_dir: str, component_list_json: str, note_text_top, note_text_bottom, num_back):
Expand Down Expand Up @@ -63,7 +63,7 @@
summary_topic_dict['cnt_failed'] += summary[Metrics.FREQUENCY.name]['cnt_failed']

destination_path = f'{report_dir}/index.html'
template_path = f'{Path(__file__).resolve().parent}/template_report_validation.html'
template_path = f'{Path(__file__).resolve().parent}/template_html_validation.html'
title = 'Validation report'
sub_title = report_dir.split('/')[-1]
with app.app_context():
Expand Down
Loading
Loading