From 9a6be836fb18106b1365a936ef1ab4224e107c08 Mon Sep 17 00:00:00 2001 From: h00467646 Date: Tue, 31 Oct 2023 09:17:21 +0800 Subject: [PATCH] =?UTF-8?q?[=E6=8E=A8=E8=8D=90=E5=B7=A5=E5=85=B7]mxRec?= =?UTF-8?q?=E5=8E=9F=E5=AD=90=E6=93=8D=E4=BD=9C=E6=B5=8B=E8=AF=95=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=E5=92=8C=E7=BB=93=E6=9E=9C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tools/atomic/gen_mt_data_0to1e.py | 78 +++++ tools/atomic/model_info.md | 19 ++ tools/atomic/sparse.sh | 60 ++++ tools/atomic/sparse_lookup.py | 266 +++++++++++++++++ tools/atomic/sparse_lookup_with_grad.py | 277 ++++++++++++++++++ tools/atomic/sparse_ops/__init__.py | 7 + tools/atomic/sparse_ops/config.py | 111 +++++++ tools/atomic/sparse_ops/ops.py | 133 +++++++++ tools/atomic/sparse_ops/utils.py | 23 ++ ...3\346\236\234-tf1.15-rec0630-cann530.xlsx" | Bin 0 -> 32363 bytes 10 files changed, 974 insertions(+) create mode 100644 tools/atomic/gen_mt_data_0to1e.py create mode 100644 tools/atomic/model_info.md create mode 100644 tools/atomic/sparse.sh create mode 100644 tools/atomic/sparse_lookup.py create mode 100644 tools/atomic/sparse_lookup_with_grad.py create mode 100644 tools/atomic/sparse_ops/__init__.py create mode 100644 tools/atomic/sparse_ops/config.py create mode 100644 tools/atomic/sparse_ops/ops.py create mode 100644 tools/atomic/sparse_ops/utils.py create mode 100644 "tools/atomic/\345\216\237\345\255\220\346\265\213\350\257\225\347\273\223\346\236\234-tf1.15-rec0630-cann530.xlsx" diff --git a/tools/atomic/gen_mt_data_0to1e.py b/tools/atomic/gen_mt_data_0to1e.py new file mode 100644 index 00000000..b9c89c65 --- /dev/null +++ b/tools/atomic/gen_mt_data_0to1e.py @@ -0,0 +1,78 @@ +import numpy as np +import tensorflow as tf +import random + +np.random.seed(0) + +line_per_sample = 10000 +samples_num = 10000 * 800 # +sparse_feat_list = ['feat_ids'] +# todo +sparse_feat_len = [100] + +# uniq_ratio = pd.read_csv("./uniq_ratio.csv") +# uniq_ratio["uniq_num"] = round(uniq_ratio["uniq_ratio"] * 301) + +num = 0 +import sys + +hot_zhanbi = sys.argv[1:][0] +hot_zhanbi = float(hot_zhanbi)/10 +print(hot_zhanbi) + +tfpath = "/home/insert/data"+str(hot_zhanbi) +import os +if not os.path.exists(tfpath): + os.mkdir(tfpath) + +tfpath = "/home/insert/data"+str(hot_zhanbi)+"/tf" + +part1=np.array(random.sample(range(0 , 2), 1) ) + +def write_records(writer,line_cnt,file_cnt): + features = { + 'label': tf.train.Feature( + float_list=tf.train.FloatList(value=np.random.randint(2, size=line_per_sample).tolist())) + } + + count = 0 + for i, sparse_feat in enumerate(sparse_feat_list): + np.random.seed(count) + # global num + # print("process num: ",num) + print("===sparse=", sparse_feat) + part2=np.array(random.sample(range(0 + 100*line_per_sample*(10*file_cnt + line_cnt),100*line_per_sample*(10* file_cnt + line_cnt+1)),int(100 * line_per_sample* (1- hot_zhanbi)) )) + features[sparse_feat] = tf.train.Feature( + int64_list=tf.train.Int64List( + value=part1.astype(np.int64).tolist()* int(100 * line_per_sample * hot_zhanbi) + part2.astype(np.int64).tolist()) + ) + + count += 1 + features = tf.train.Features(feature=features) + example = tf.train.Example(features=features) + writer.write(example.SerializeToString()) + + +def gen_tfrecords(tfpath): + file_cnt = 0 + line_per_file = 10 + line_cnt = 0 + writer = tf.python_io.TFRecordWriter(f"{tfpath}_{file_cnt}.tfrecord") + sample_cnt = 0 + while True: + write_records(writer,line_cnt,file_cnt) + line_cnt += 1 + sample_cnt += line_per_sample + print(f">>>>>>>>>>>>count {sample_cnt} end.") + if sample_cnt == samples_num: + break + if line_cnt == line_per_file: + file_cnt += 1 + line_cnt = 0 + writer.close() + writer = tf.python_io.TFRecordWriter(f"{tfpath}_{file_cnt}.tfrecord") + writer.close() + + +if __name__ == '__main__': + gen_tfrecords(tfpath=tfpath) diff --git a/tools/atomic/model_info.md b/tools/atomic/model_info.md new file mode 100644 index 00000000..a14533cc --- /dev/null +++ b/tools/atomic/model_info.md @@ -0,0 +1,19 @@ + +### 业务领域/场景 +原子操作测试 + +### 模型框架 +TF1.15.0/TF2.6.5 + +### 使用方法 +#### 生成数据集 +Python3 gen_mt_data_0to1e.py 5 (这里5的含义为重复度50%) +默认生成在 /home/insert/ 路径下 + +#### 运行测试 +Sparse.sh 需要根据实际环境进行配置 +测试 sparse lookup +./sparse.sh 8(卡数) sparse_lookup.py 8(emb size) 5(重复度) 1 0 0 + + + diff --git a/tools/atomic/sparse.sh b/tools/atomic/sparse.sh new file mode 100644 index 00000000..56968da1 --- /dev/null +++ b/tools/atomic/sparse.sh @@ -0,0 +1,60 @@ +#!/bin/bash +local_rank_size=$1 +host=localhost +py=$2 +my_dim=$3 +chongfudu=$4 +all2all=$5 +pre=$6 +slp=$7 +rm -rf /root/atc_data/* +rm -rf /root/ascend/* +rm -rf kernel_meta_* + + +export ALL2ALL=$5 +export HOST_PIPELINE_OPS_LIB_PATH=/usr/local/python3.7.5/lib/python3.7/site-packages/mx_rec/libasc/libasc_ops.so +export EMPTY_TENSOR=1 +export ENABLE_RUNTIME_V2=0 +mpi_path=/usr/local/openmpi/bin/ +so_path=/usr/local/python3.7.5/lib/python3.7/site-packages/mx_rec/libasc/ +interface="enp61s0f0" +ulimit -c 0 +export ASCEND_GLOBAL_LOG_LEVEL=0 +export TF_CPP_MIN_LOG_LEVEL=3 +export ASCEND_INSTALL_PATH=/usr/local/Ascend/latest/ +export ASCEND_HOME_PATH=${ASCEND_INSTALL_PATH} +export ASCEND_LATEST_INSTALL_PATH=/usr/local/Ascend +#export ASCEND_HOME_PATH=${ASCEND_INSTALL_PATH}/ +CANN_BIN_PATH=${ASCEND_HOME_PATH}/bin:${ASCEND_HOME_PATH}/compiler/ccec_compiler/bin +CANN_PYTHONPATH=${ASCEND_HOME_PATH}/python/site-packages:${ASCEND_HOME_PATH}/opp/op_impl/built-in/ai_core/tbe #:${ASCEND_INSTALL_PATH}/tfplugin/latest/python/site-packages +PYTHON_BIN_PATH=/usr/local/python3.7.5/bin/ +export PATH=${mpi_path}/bin:${PYTHON_BIN_PATH}:${CANN_BIN_PATH}:$PATH +export PYTHONPATH=${PYTHONPATH}:/usr/local/Ascend/latest/python/site-packages:${so_path}:${CANN_PYTHONPATH} +export LD_PRELOAD=/lib64/libgomp.so.1 +CANN_LD_PATH=${ASCEND_HOME_PATH}/runtime/lib64:${ASCEND_HOME_PATH}/fwkacllib/lib64:${ASCEND_HOME_PATH}/lib64:${ASCEND_HOME_PATH}/lib64/plugin/opskernel:${ASCEND_HOME_PATH}/lib64/plugin/nnengine +export LD_LIBRARY_PATH=${so_path}:/usr/local/python3.7.5/lib/python3.7/site-packages/mx_rec/libasc/:/home/insert/src/platform/securec/lib/:${CANN_LD_PATH}:/home/opensource/opensource/hdf5/lib:/usr/local/lib:/usr/local/python3.7.5/lib:$LD_LIBRARY_PATH +export ASCEND_AICPU_PATH=${ASCEND_HOME_PATH} +export ASCEND_OPP_PATH=${ASCEND_HOME_PATH}/opp +export TOOLCHAIN_HOME=${ASCEND_HOME_PATH}/toolkit + +export BETTER_EXCEPTIONS=1 +mpi_args='-x BIND_INFO="0:48 48:48 96:48" -x SPDLOG_LEVEL=debug -bind-to none' +# rm logs +rm *txt >/dev/null +rm -rf /root/ascend/log/* + +# rm shm +for i in $(ipcs -m | tail -n +4 | awk {'print $2'}); do + ipcrm -m $i +done + +num_process=${local_rank_size} +host_string=${host//_/:${local_rank_size},node}:${local_rank_size} +echo run in $host_string + +interface="lo" + +#python3.7 -c "import tensorflow;print(tensorflow.__path__)" +horovodrun --network-interface ${interface} -np ${num_process} --mpi-args "${mpi_args}" --mpi -H localhost:${local_rank_size} \ + python3.7 ${py} --local_rank_size ${local_rank_size} --hccl_json hccl_json_${local_rank_size}p.json --my_dim ${my_dim} --chongfudu $chongfudu --pre $pre --slp $slp |tee temp_{$my_dim}_{$chongfudu}_{$ALL2ALL}_{$pre}_{$slp}.log diff --git a/tools/atomic/sparse_lookup.py b/tools/atomic/sparse_lookup.py new file mode 100644 index 00000000..570c683e --- /dev/null +++ b/tools/atomic/sparse_lookup.py @@ -0,0 +1,266 @@ +import os +import sys +import time +import argparse +import numpy as np +import tensorflow as tf +from mpi4py import MPI # must before emb_cache after SparseOps +import psutil +import sys +from sklearn.metrics import roc_auc_score + +from tensorflow.python.ops import math_ops +from tensorflow.python.framework import ops +from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig +from npu_bridge.hccl import hccl_ops +from npu_bridge.estimator import npu_ops + +from mx_rec.graph.modifier import modify_graph_and_start_emb_cache +from mx_rec.core.asc.manager import start_asc_pipeline +from mx_rec.core.asc.helper import FeatureSpec, get_asc_insert_func +from mx_rec.util.initialize import get_rank_size, init, clear_channel, get_rank_id, set_if_load, \ + terminate_config_initializer +from mx_rec.constants.constants import MxRecMode +from mx_rec.core.embedding import create_table, sparse_lookup +from mx_rec.util.initialize import get_ascend_global_hashtable_collection + +from sparse_ops.config import set_ascend_env + +USE_PIPELINE_TEST = False +USE_STATIC = False +USE_HOT = False +USE_EXPANSION = False + +from mx_rec.constants.constants import ASCEND_SPARSE_LOOKUP_LOCAL_EMB, ASCEND_SPARSE_LOOKUP_ID_OFFSET + + +class WideDeep: + def __init__(self, input_data, feature_spec_list, hashtable): + self.lbl_hldr = input_data["global_labels"][0] + self.input_data = input_data + self.feature_spec_list = feature_spec_list + self.hash_table_list = hashtable + self.forward() + + def forward(self): + for feature, hash_table in zip(self.feature_spec_list, self.hash_table_list): + self.embedding = sparse_lookup(hash_table, feature, 1024 * 1024 // rank_size, dim=None, is_train=True, + name="merged_embedding_lookup", modify_graph=False, batch=self.input_data) + + # with tf.control_dependencies([self.embedding]): + self.op = self.embedding[0][0] + return self.op + + +def input_fn_tfrecord(feature_spec_list, rank_id, local_rank_id, rank_size, data_path, file_pattern, total_batch_size, + num_epochs=1, perform_shuffle=False, training=True): + line_per_sample = 1024 * 8 + total_batch_size = int(total_batch_size / line_per_sample) + num_parallel = 8 + + def extract_fn(data_record): + features = { + 'label': tf.FixedLenFeature(shape=(line_per_sample,), dtype=tf.float32), + 'feat_ids': tf.FixedLenFeature(shape=(128 * line_per_sample,), dtype=tf.int64) + } + sample = tf.parse_single_example(data_record, features) + return sample + + def reshape_fn(batch): + batch['label'] = tf.reshape(batch['label'], [-1, ]) + batch['feat_ids'] = tf.reshape(batch['feat_ids'], [-1, 128]) + return batch + + all_files = os.listdir(data_path) + files = [os.path.join(data_path, f) for f in all_files if f.startswith(file_pattern)] + dataset = tf.data.TFRecordDataset(files, num_parallel_reads=num_parallel) + batch_size = total_batch_size // rank_size + dataset = dataset.shard(rank_size, rank_id) + dataset = dataset.repeat(num_epochs) + dataset = dataset.map(extract_fn, num_parallel_calls=num_parallel).batch(batch_size, + drop_remainder=True) + dataset = dataset.map(reshape_fn, num_parallel_calls=num_parallel) + insert_fn = get_asc_insert_func(tgt_key_specs=feature_spec_list, is_training=True, dump_graph=False) + dataset = dataset.map(insert_fn) + + dataset = dataset.prefetch(int(100)) + return dataset + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='base') + parser.add_argument('--local_rank_size') + parser.add_argument('--hosts') + parser.add_argument('--hccl_json') + parser.add_argument('--my_dim') + parser.add_argument('--chongfudu') + parser.add_argument('--new_key') + parser.add_argument('--slp') + args = parser.parse_args() + local_rank_size = int(args.local_rank_size) + comm = MPI.COMM_WORLD + rank_id = comm.Get_rank() + rank_size = comm.Get_size() + print(f"rank {rank_id}/{rank_size}") + local_rank_id = rank_id % local_rank_size + set_ascend_env(rank_id, rank_size, local_rank_size, host=args.hosts, file=args.hccl_json) + + # create session + sess_config = tf.ConfigProto() + custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add() + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["mix_compile_mode"].b = True + custom_op.name = "NpuOptimizer" + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes('must_keep_origin_dtype') + sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF + custom_op.parameter_map["enable_data_pre_proc"].b = True + sess_config.gpu_options.allow_growth = True + custom_op.parameter_map["hcom_parallel"].b = False + custom_op.parameter_map["HCCL_algorithm"].s = tf.compat.as_bytes("level0:fullmesh;level1:pairwise") + + custom_op.parameter_map["iterations_per_loop"].i = 10 + # custom_op.parameter_map["enable_dump"].b = True + # custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes("./dump") + # custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes("11|12") + # custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all") + # custom_op.parameter_map["op_debug_level"].i = 0 + custom_op.parameter_map["op_wait_timeout"].i = 500 + custom_op.parameter_map["op_execute_timeout"].i = 500 + custom_op.parameter_map["op_precision_mode"].s = tf.compat.as_bytes("op_impl_mode.ini") + custom_op.parameter_map["graph_memory_max_size"].s = tf.compat.as_bytes(str(30000000000)) + custom_op.parameter_map["variable_memory_max_size"].s = tf.compat.as_bytes(str(30000000000)) + # custom_op.parameter_map["profiling_mode"].b = True + # custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes( + # '{"output":"/home","training_trace":"on","task_trace":"on","fp_point":"","bp_point":"","aicpu":"on","aic_metrics":"PipeUtilization"}') + + global_start_time = time.time() + tf.set_random_seed(10086) + np.random.seed(10086) + + my_dim = int(args.my_dim) + print("my_dim=", my_dim) + + hot_zhanbi = args.chongfudu + hot_zhanbi = float(hot_zhanbi) / 10 + + # if hot_zhanbi == 0: + # hot_zhanbi = int(hot_zhanbi) + + config = { + "data_path": "./data1/data" + str(hot_zhanbi) + "_" + str(float(args.new_key)) + "/", + "train_file_pattern": "tf", + "test_file_pattern": "test", + "batch_size": 1024 * 8, + "field_num": 128, + "send_count": 1024 * 1024 // rank_size, # 65536 * 10 > 39(field num) * 16000(bz) + "id_emb_dim": my_dim, + "ext_emb_vec_size": my_dim, + "train_epoch": 1, + "dev_vocab_size": 100000001 + } + + # model run parameter + print_steps = 300 + evaluate_stride = 80000 # eval every 200 steps + eval_steps = -1 # 8 ranks 34 + stop_steps = 95 + # Hybrid step1.1: init cache + emb_name = "wide_deep_emb" + + dev_vocab_size = config["dev_vocab_size"] # 23120 + host_vocab_size = 0 + + init(True, rank_id=rank_id, rank_size=local_rank_size, train_interval=100, eval_steps=-1, + prefetch_batch_number=1, use_dynamic=0, use_hot=1, use_dynamic_expansion=0) + + tf.disable_eager_execution() + ###################################### + feature_spec_list = [ + FeatureSpec("feat_ids", feat_count=128, table_name="merged_sparse_embeddings", batch_size=config["batch_size"])] + with tf.device('/cpu:0'): + train_dataset = input_fn_tfrecord(feature_spec_list=feature_spec_list, + rank_id=rank_id, + local_rank_id=local_rank_id, + rank_size=rank_size, + data_path=config["data_path"], + file_pattern=config["train_file_pattern"], + total_batch_size=int(rank_size * config["batch_size"]), + perform_shuffle=(not USE_PIPELINE_TEST), + num_epochs=config["train_epoch"]) + train_iterator = train_dataset.make_initializable_iterator() + train_next_iter = train_iterator.get_next() + + train_input_data = {"global_labels": train_next_iter["label"], + "feat_ids": train_next_iter["feat_ids"], + } + + sparse_hashtable = create_table(key_dtype=tf.int64, + dim=tf.TensorShape([my_dim]), + name="merged_sparse_embeddings", + emb_initializer=tf.variance_scaling_initializer(mode="fan_avg", + distribution='normal', seed=0), + device_vocabulary_size=dev_vocab_size * local_rank_size, + mode=MxRecMode.mapping("ASC")) + + model = WideDeep(train_input_data, feature_spec_list, [sparse_hashtable]) + MODIFY_GRAPH_FLAG = False + if MODIFY_GRAPH_FLAG: + modify_graph_and_start_emb_cache(dump_graph=False) + else: + start_asc_pipeline() + + with tf.Session(config=sess_config) as sess: + sess.run(tf.global_variables_initializer()) + sess.run([train_iterator.initializer]) + # build model + print("start build wdl(single domain) model") + print("=========start============") + # start run loop + total_start_time = time.time() + current_steps = 0 + train_finished = False + time.sleep(int(args.slp)) + while not train_finished: + try: + current_steps += 1 + print("current step =", current_steps) + # + run_dict = { + "adam": model.op, + "lbl_hldr": model.lbl_hldr, + } + if current_steps == 1: + total_start_time = time.time() + start_time = time.time() + print("start sess run") + results = sess.run(fetches=run_dict) + print("start sess run 1") + end_time = time.time() + print(f"current_steps: {current_steps} ,step time:{(end_time - start_time) * 1000}") + if current_steps <= 5: + total_start_time = time.time() + if current_steps % print_steps == 0: + print("----------" * 10) + try: + print( + f"current_steps: {current_steps} ,deep_loss:{results['deep_loss']}," + f"e2etime per step:{(end_time - start_time) * 1000}") + except KeyError: + print(f"current_steps: {current_steps}") + print("----------" * 10) + + if current_steps >= stop_steps: + train_finished = True + # + except tf.errors.OutOfRangeError: + train_finished = True + + # train_finished + # emb_cache.destroy() + # MPI.Finalize() + print( + f"training {current_steps} steps, consume time: {(time.time() - total_start_time) / (current_steps - 5) * 1000} ") + + terminate_config_initializer() + # emb_cache.destroy() + # MPI.Finalize() diff --git a/tools/atomic/sparse_lookup_with_grad.py b/tools/atomic/sparse_lookup_with_grad.py new file mode 100644 index 00000000..3d7d37e5 --- /dev/null +++ b/tools/atomic/sparse_lookup_with_grad.py @@ -0,0 +1,277 @@ +import os +import sys +import time +import argparse +import numpy as np +import tensorflow as tf +from mpi4py import MPI # must before emb_cache after SparseOps +import psutil +import sys +from sklearn.metrics import roc_auc_score + +from tensorflow.python.ops import math_ops +from tensorflow.python.framework import ops +from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig +from npu_bridge.hccl import hccl_ops +from npu_bridge.estimator import npu_ops + +from mx_rec.graph.modifier import modify_graph_and_start_emb_cache +from mx_rec.core.asc.manager import start_asc_pipeline +from mx_rec.core.asc.helper import FeatureSpec, get_asc_insert_func +from mx_rec.util.initialize import get_rank_size, init, clear_channel, get_rank_id, set_if_load, \ + terminate_config_initializer +from mx_rec.constants.constants import MxRecMode +from mx_rec.core.embedding import create_table, sparse_lookup +from mx_rec.util.initialize import get_ascend_global_hashtable_collection +from mx_rec.optimizers.lazy_adam import CustomizedLazyAdam +from sparse_ops.config import set_ascend_env + +USE_PIPELINE_TEST = False +USE_STATIC = False +USE_HOT = False +USE_EXPANSION = False + + +def create_hash_optimizer(): + return CustomizedLazyAdam() + + +def get_sparse_optimizer(): + sparse_optimizer = create_hash_optimizer() + return sparse_optimizer + + +class WideDeep: + def __init__(self, input_data, feature_spec_list, hashtable): + self.lbl_hldr = input_data["global_labels"][0] + self.input_data = input_data + self.feature_spec_list = feature_spec_list + self.hash_table_list = hashtable + self.forward() + + def forward(self): + for feature, hash_table in zip(self.feature_spec_list, self.hash_table_list): + self.embedding = sparse_lookup(hash_table, feature, 1024 * 1024 // rank_size, dim=None, is_train=True, + name="merged_embedding_lookup", modify_graph=False, batch=self.input_data) + self.loss = tf.reduce_mean(self.embedding, axis=0) + with tf.control_dependencies([self.loss]): + self.op = tf.no_op() + return self.op + + +def input_fn_tfrecord(feature_spec_list, rank_id, local_rank_id, rank_size, data_path, file_pattern, total_batch_size, + num_epochs=1, perform_shuffle=False, training=True): + line_per_sample = 1024 * 8 + total_batch_size = int(total_batch_size / line_per_sample) + num_parallel = 8 + + def extract_fn(data_record): + features = { + 'label': tf.FixedLenFeature(shape=(line_per_sample,), dtype=tf.float32), + 'feat_ids': tf.FixedLenFeature(shape=(128 * line_per_sample,), dtype=tf.int64) + } + sample = tf.parse_single_example(data_record, features) + return sample + + def reshape_fn(batch): + batch['label'] = tf.reshape(batch['label'], [-1, ]) + batch['feat_ids'] = tf.reshape(batch['feat_ids'], [-1, 128]) + return batch + + all_files = os.listdir(data_path) + files = [os.path.join(data_path, f) for f in all_files if f.startswith(file_pattern)] + dataset = tf.data.TFRecordDataset(files, num_parallel_reads=num_parallel) + batch_size = total_batch_size // rank_size + dataset = dataset.shard(rank_size, rank_id) + dataset = dataset.repeat(num_epochs) + dataset = dataset.map(extract_fn, num_parallel_calls=num_parallel).batch(batch_size, + drop_remainder=True) + dataset = dataset.map(reshape_fn, num_parallel_calls=num_parallel) + insert_fn = get_asc_insert_func(tgt_key_specs=feature_spec_list, is_training=True, dump_graph=False) + dataset = dataset.map(insert_fn) + dataset = dataset.prefetch(int(100)) + return dataset + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='base') + parser.add_argument('--local_rank_size') + parser.add_argument('--hosts') + parser.add_argument('--hccl_json') + parser.add_argument('--my_dim') + parser.add_argument('--chongfudu') + parser.add_argument('--new_key') + parser.add_argument('--slp') + args = parser.parse_args() + local_rank_size = int(args.local_rank_size) + comm = MPI.COMM_WORLD + rank_id = comm.Get_rank() + rank_size = comm.Get_size() + print(f"rank {rank_id}/{rank_size}") + local_rank_id = rank_id % local_rank_size + set_ascend_env(rank_id, rank_size, local_rank_size, host=args.hosts, file=args.hccl_json) + + # create session + sess_config = tf.ConfigProto() + custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add() + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["mix_compile_mode"].b = True + custom_op.name = "NpuOptimizer" + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes('must_keep_origin_dtype') + sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF + custom_op.parameter_map["enable_data_pre_proc"].b = True + sess_config.gpu_options.allow_growth = True + custom_op.parameter_map["hcom_parallel"].b = False + custom_op.parameter_map["HCCL_algorithm"].s = tf.compat.as_bytes("level0:fullmesh;level1:pairwise") + + custom_op.parameter_map["iterations_per_loop"].i = 5 + custom_op.parameter_map["enable_dump"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes("./dump") + custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes("1|2") + custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all") + custom_op.parameter_map["op_wait_timeout"].i = 500 + custom_op.parameter_map["op_execute_timeout"].i = 500 + custom_op.parameter_map["op_precision_mode"].s = tf.compat.as_bytes("op_impl_mode.ini") + custom_op.parameter_map["graph_memory_max_size"].s = tf.compat.as_bytes(str(30000000000)) + custom_op.parameter_map["variable_memory_max_size"].s = tf.compat.as_bytes(str(30000000000)) + + global_start_time = time.time() + tf.set_random_seed(10086) + np.random.seed(10086) + + my_dim = int(args.my_dim) + print("my_dim=", my_dim) + + hot_zhanbi = args.chongfudu + hot_zhanbi = float(hot_zhanbi) / 10 + + # if hot_zhanbi == 0: + # hot_zhanbi = int(hot_zhanbi) + + config = { + "data_path": "./data1/data" + str(hot_zhanbi) + "_" + str(float(args.new_key)) + "/", + "train_file_pattern": "tf", + "test_file_pattern": "test", + "batch_size": 1024 * 8, + "field_num": 128, + "send_count": 1024 * 1024 // rank_size, # 65536 * 10 > 39(field num) * 16000(bz) + "id_emb_dim": my_dim, + "ext_emb_vec_size": my_dim, + "train_epoch": 1, + "dev_vocab_size": 5000001 + } + + # model run parameter + print_steps = 300 + evaluate_stride = 80000 # eval every 200 steps + eval_steps = -1 # 8 ranks 34 + stop_steps = 5 + # Hybrid step1.1: init cache + emb_name = "wide_deep_emb" + + dev_vocab_size = config["dev_vocab_size"] # 23120 + host_vocab_size = 0 + + init(True, rank_id=rank_id, rank_size=local_rank_size, train_interval=100, eval_steps=-1, + prefetch_batch_number=1, use_dynamic=0, use_hot=1, use_dynamic_expansion=0) + + tf.disable_eager_execution() + ###################################### + feature_spec_list = [ + FeatureSpec("feat_ids", feat_count=128, table_name="merged_sparse_embeddings", batch_size=config["batch_size"])] + with tf.device('/cpu:0'): + train_dataset = input_fn_tfrecord(feature_spec_list=feature_spec_list, + rank_id=rank_id, + local_rank_id=local_rank_id, + rank_size=rank_size, + data_path=config["data_path"], + file_pattern=config["train_file_pattern"], + total_batch_size=int(rank_size * config["batch_size"]), + perform_shuffle=(not USE_PIPELINE_TEST), + num_epochs=config["train_epoch"]) + train_iterator = train_dataset.make_initializable_iterator() + train_next_iter = train_iterator.get_next() + + train_input_data = {"global_labels": train_next_iter["label"], + "feat_ids": train_next_iter["feat_ids"], + } + + sparse_optimizer_list = get_sparse_optimizer() + + sparse_hashtable = create_table(key_dtype=tf.int64, + dim=tf.TensorShape([my_dim]), + name="merged_sparse_embeddings", + emb_initializer=tf.variance_scaling_initializer(mode="fan_avg", + distribution='normal', seed=0), + device_vocabulary_size=dev_vocab_size * local_rank_size, + optimizer_list=sparse_optimizer_list, + mode=MxRecMode.mapping("ASC")) + + sparse_variables = tf.compat.v1.get_collection(get_ascend_global_hashtable_collection()) + model = WideDeep(train_input_data, feature_spec_list, [sparse_hashtable]) + + train_ops = [] + for loss, sparse_optimizer in zip([model.loss], [sparse_optimizer_list]): + sparse_grads = tf.gradients(loss, sparse_variables) + grads_and_vars = [(grad, variable) for grad, variable in zip(sparse_grads, sparse_variables)] + train_ops.append(sparse_optimizer.apply_gradients(grads_and_vars)) + + MODIFY_GRAPH_FLAG = False + if MODIFY_GRAPH_FLAG: + modify_graph_and_start_emb_cache(dump_graph=False) + else: + start_asc_pipeline() + + with tf.Session(config=sess_config) as sess: + sess.run(tf.global_variables_initializer()) + sess.run([train_iterator.initializer]) + # build model + print("start build wdl(single domain) model") + print("=========start============") + # start run loop + total_start_time = time.time() + current_steps = 0 + train_finished = False + time.sleep(int(args.slp)) + while not train_finished: + try: + current_steps += 1 + print("current step =", current_steps) + # + run_dict = { + "loss": model.op, + "adam": train_ops, + "lbl_hldr": model.lbl_hldr, + } + if current_steps == 1: + total_start_time = time.time() + start_time = time.time() + print("start sess run") + results = sess.run(fetches=run_dict) + print("start sess run 1") + end_time = time.time() + print(f"current_steps: {current_steps} ,step time:{(end_time - start_time) * 1000}") + if current_steps <= 5: + total_start_time = time.time() + if current_steps % print_steps == 0: + print("----------" * 10) + try: + print( + f"current_steps: {current_steps} ,deep_loss:{results['deep_loss']}," + f"e2etime per step:{(end_time - start_time) * 1000}") + except KeyError: + print(f"current_steps: {current_steps}") + print("----------" * 10) + + if current_steps >= stop_steps: + train_finished = True + + except tf.errors.OutOfRangeError: + train_finished = True + + # train_finished + print( + f"training {current_steps} steps, consume time: {(time.time() - total_start_time) / (current_steps - 5) * 1000} ") + + terminate_config_initializer() + MPI.Finalize() \ No newline at end of file diff --git a/tools/atomic/sparse_ops/__init__.py b/tools/atomic/sparse_ops/__init__.py new file mode 100644 index 00000000..53640a7e --- /dev/null +++ b/tools/atomic/sparse_ops/__init__.py @@ -0,0 +1,7 @@ +""" +init +""" +from __future__ import absolute_import +from sparse_ops.config import get_path + +__all__ = ["get_path", ] diff --git a/tools/atomic/sparse_ops/config.py b/tools/atomic/sparse_ops/config.py new file mode 100644 index 00000000..f10d12fd --- /dev/null +++ b/tools/atomic/sparse_ops/config.py @@ -0,0 +1,111 @@ +""" +配置文件 +""" +from __future__ import absolute_import +import os +import json +import psutil + + +def get_path(): + """ + 打印当前行号 + """ + return os.path.dirname(__file__) + + +def gen_config(server_str, local_rank_size, path=None): + """ + 生成hccl配置 + """ + + def _device(local_rank_id, rank_id, server_id): + return { + "device_id": f"{local_rank_id}", + "device_ip": f'192.{local_rank_id % 4}.{server_id}.{1 + local_rank_id // 4}', + "rank_id": f"{rank_id}" + } + + def _server(server_id): + return { + "device": [], + "server_id": f"90.91.141.{server_id}" + } + + conf = { + "server_count": "-1", + "server_list": [], + "status": "completed", + "version": "1.0" + } + rank_id = 0 + servers = str(server_str).split('_') + conf['server_count'] = str(len(servers)) + for server in servers: + srv = _server(server) + for local_rank_id in range(local_rank_size): + dev = _device(local_rank_id, rank_id, server) + rank_id = rank_id + 1 + srv["device"].append(dev) + conf['server_list'].append(srv) + + conf_str = json.dumps(conf) + if path is None: + path = '/tmp/hccl.json' + with open(path, 'w') as file_handle: + file_handle.write(conf_str) + + +def set_ascend_env(rank, rank_size, local_rank_size, host, file=None, dev_id=-1, dev_index=-1): + """ + 配置昇腾相关的参数和环境变量,生成hccl配置 + """ + rank = str(rank) + rank_size = str(rank_size) + local_rank_size = int(local_rank_size) + host = str(host) + + os.environ["MOX_USE_NPU"] = "1" + os.environ["FUSION_TENSOR_SIZE"] = "2000000000" + os.environ["MOX_USE_TF_ESTIMATOR"] = "0" + os.environ["MOX_USE_TDT"] = "1" + os.environ["HEARTBEAT"] = "1" + os.environ["CONITNUE_TRAIN"] = "true" + + os.environ["RANK_ID"] = rank + local_rank_id = int(rank) % int(local_rank_size) + if dev_id != -1: + os.environ["DEVICE_ID"] = str(dev_id) + os.environ["ASCEND_DEVICE_ID"] = str(dev_id) + else: + os.environ["DEVICE_ID"] = str(local_rank_id) + os.environ["ASCEND_DEVICE_ID"] = str(local_rank_id) + if dev_index != -1: + os.environ["DEVICE_INDEX"] = str(dev_index) + else: + os.environ["DEVICE_INDEX"] = str(local_rank_id) + + os.environ["RANK_SIZE"] = rank_size + if file: + os.environ["RANK_TABLE_FILE"] = file + else: + gen_config(host, local_rank_size) + os.environ["RANK_TABLE_FILE"] = "/tmp/hccl.json" + os.environ["HCCL_CONNECT_TIMEOUT"] = "600" + + os.environ["JOB_ID"] = "10086" + os.environ["SOC_VERSION"] = "Ascend910" + os.environ["GE_AICPU_FLAG"] = "1" + os.environ["NEW_GE_FE_ID"] = "1" + os.environ["EXPERIMENTAL_DYNAMIC_PARTITION"] = "1" + os.environ["ENABLE_FORCE_V2_CONTROL"] = "1" + + +def bind_cpu(): + p = psutil.Process() + try: + bind_start = 48 + bind_count = 96 + p.cpu_affinity([bind_start + x for x in range(bind_count)]) + except IndexError: + print("error cpu bind info, skipped.") diff --git a/tools/atomic/sparse_ops/ops.py b/tools/atomic/sparse_ops/ops.py new file mode 100644 index 00000000..35fe2462 --- /dev/null +++ b/tools/atomic/sparse_ops/ops.py @@ -0,0 +1,133 @@ +""" +sparse ops +""" +from __future__ import absolute_import +import tensorflow as tf +from npu_bridge.hccl import hccl_ops +from sparse_ops import utils +from mpi4py import MPI + +MPI.Init_thread(MPI.THREAD_MULTIPLE) # must before emb_cache +utils.init = True + + +class SparseOps: + """ + embedding相关的接口 + """ + + def __init__(self, fallback=False): + # context + self.fallback = fallback + self.all2all = hccl_ops.all_to_all_v + + def get_a2a_args(self, lookup_vec_size, mini_bs_w_field, rank_size, send_count, emb_vec_size): + """ + 获取a2a args信息 + """ + if self.fallback: + send_count = tf.cond(lookup_vec_size > send_count * rank_size, + lambda: mini_bs_w_field // rank_size, + lambda: send_count) + all2all_args = { + "sc": tf.cast([send_count * emb_vec_size] * rank_size, tf.int64), + "ss": tf.cast([send_count * emb_vec_size * i for i in range(rank_size)], tf.int64)} + all2all_args['rc'] = all2all_args['sc'] + all2all_args['rs'] = all2all_args['ss'] + return all2all_args, send_count * rank_size + + def forward_alltoall(self, all2all_args, restore_vec, hot_pos, emb_vec, emb_vec_size): + """ + emb的前向通信 + all2all_args:用all2all用到的参数 + restore_vec:恢复向量 + emb_vec:输入的emb + """ + emb_vec = tf.reshape(emb_vec, [-1]) + + result = self.all2all(send_data=emb_vec, + send_counts=all2all_args['sc'], + send_displacements=all2all_args['ss'], + recv_counts=all2all_args['rc'], + recv_displacements=all2all_args['rs'] + ) + + result = tf.reshape(result, + [-1, emb_vec_size], + name="after_all2all_reshape") + if hot_pos is not None: + result = tf.concat([tf.gather(result, hot_pos, name="hot_pos"), result], axis=0) + + output = tf.gather(result, restore_vec) + return output + + def forward_alltoallc(self, all2all_args, restore_vec, emb_vec, emb_vec_size, rank): + """ + emb的前向通信 + all2all_args:用all2all用到的参数 + restore_vec:恢复向量 + emb_vec:输入的emb + """ + emb_vec = tf.reshape(emb_vec, [-1]) + + result = hccl_ops.all_to_all_v_c(send_data=emb_vec, + send_count_matrix=all2all_args, + rank=rank + ) + + result = tf.reshape(result, + [-1, emb_vec_size], + name="after_all2all_reshape") + output = tf.gather(result, restore_vec) + return output + + def backward_alltoall(self, emb_grad, hot_pos, segment_ids, num_segments, all2all_args): + """ + emb梯度的反向通信 + id_emb_grad:原始梯度 + segment_ids:恢复向量 + num_segments:压缩后的长度 + """ + # unique_local_grad 2node shape 37755 same with rc total and num_segment + # unique_local_grad shape is [40052, 80] + if hot_pos is not None: + unique_local_grad = tf.math.unsorted_segment_sum(emb_grad, + segment_ids=segment_ids, + num_segments=num_segments + tf.shape(hot_pos)[0], + name="backward_combine") + hot, cold = tf.split(unique_local_grad, + [tf.shape(hot_pos)[0], tf.shape(unique_local_grad)[0] - tf.shape(hot_pos)[0]], axis=0) + unique_local_grad = tf.tensor_scatter_nd_update(cold, tf.expand_dims(hot_pos, 1), hot) + else: + unique_local_grad = tf.math.unsorted_segment_sum(emb_grad, + segment_ids=segment_ids, + num_segments=num_segments, name="backward_combine") + + unique_grad = self.all2all(send_data=unique_local_grad, + send_counts=all2all_args['rc'], + send_displacements=all2all_args['rs'], + recv_counts=all2all_args['sc'], + recv_displacements=all2all_args['ss'] + ) + return unique_grad + + def backward_alltoallc(self, emb_grad, segment_ids, num_segments, all2all_args, rank): + """ + emb梯度的反向通信 + id_emb_grad:原始梯度 + segment_ids:恢复向量 + num_segments:压缩后的长度 + """ + unique_local_grad = tf.math.unsorted_segment_sum(emb_grad, + segment_ids=segment_ids, + num_segments=num_segments, name="backward_combine") + # unique_local_grad 2node shape 37755 same with rc total and num_segment + # unique_local_grad shape is [40052, 80] + unique_local_grad = tf.reshape(unique_local_grad, [-1]) + + all2all_args = tf.transpose(all2all_args) + unique_grad = hccl_ops.all_to_all_v_c(send_data=unique_local_grad, + send_count_matrix=all2all_args, + rank=rank + ) + return unique_grad diff --git a/tools/atomic/sparse_ops/utils.py b/tools/atomic/sparse_ops/utils.py new file mode 100644 index 00000000..07cf796d --- /dev/null +++ b/tools/atomic/sparse_ops/utils.py @@ -0,0 +1,23 @@ +""" +utils +""" +from __future__ import absolute_import +import tensorflow as tf +from mpi4py import rc + +tf.get_logger().setLevel("ERROR") +rc.initialize = False # if = True, The Init is done when "from mpi4py import MPI" is called + + +def ops(): + """ + 返回emb相关的算子 + """ + return tf.load_op_library("libcust_ops.so") + + +def dataset_ops(): + """ + 返回emb相关的算子 + """ + return tf.load_op_library("libasc_dataset_ops.so") diff --git "a/tools/atomic/\345\216\237\345\255\220\346\265\213\350\257\225\347\273\223\346\236\234-tf1.15-rec0630-cann530.xlsx" "b/tools/atomic/\345\216\237\345\255\220\346\265\213\350\257\225\347\273\223\346\236\234-tf1.15-rec0630-cann530.xlsx" new file mode 100644 index 0000000000000000000000000000000000000000..195f0ed29faf77934d67422b1775a106b03db272 GIT binary patch literal 32363 zcmeFY^LJ#?w=WvowyI)V9oy`nW81c^j%{?D?%3(rPCB-Yj`^y;=i$C_?tgIZ{$Y=* zvG*E#%{Au-bL}c+IY=lhFc>g6FfcH3FmT+-}FeF$oFibEw2pus8dsj1iR|9n~ zM>7{aCQmzCvLYx5ngTEg(Eb1C_#eCi)nDao2wBm(VP8mb^kOs>QA?rfT%=b`2&EC) z#^#pkt8ufOtUq7l_jy9MJy|!$qfYagAvA~EJx+hY)D77!`A(bgNLYNFnNf6;;u7Q^{&w% z>rwczRT>{eHWP{vo-uuCRKXRWm9LYxNw2+3meujzExHJwn$++O9Wj-;*Ox4F;9$<0 zMaAKZSW5;QO^5)P*HW2nCaVwxI-wYOg0s1!C&@`wRT`j*<=n2X&h{NMy#BVWYN=Jo zmuQ8+l0?01YplVe&F$7;;!Gm;?15vXt)ha~x~qPt;1w!hBeou834loZL3bU9vP%G| z-IFd?&H@>#TLbP|!B4bu%fAnfEX@yVGJho!k>Qit`vqgGVDT7Y(*%{chH`9wh9#+M9O>)_a4;9L7@w5vG1#2=`1dY$iizQ>_VOppFgY;brd5 z8<6OmSyW!hU%a+^gB&*+hZt~|P@O+5wjZQmV4t6mV9NhLfH!?*0o;MARvv_3L=eCY zoXu=qn3?{a{|~VL2c!9a3B5W=L8+ewDf~+MEn?(xapOChsGNt0bO*V*f1u1dYExV> zkZ_}ing~suAOun>pv(VlY;EI9-02AU!!Ad436S z+C@$H&lc{2)8y&wfTgsu6J()CUitI6OzaUCV~dqq-w|8#`)7i$YL>iKKaKO8`6#>$ zOl^Pv70>R$eD`Hk%o$Oo;J|odo0b};EVvK&*2r-)p6fNh4c}iiay=RxLt4HG(z^c+ z5__`C?=+w*8c<+h1YmIBp0>>Yg%b}4XB%S&2b+I7`2S!A93+OId;e!2)hP;c11xCa zSHYj;`}Xs-?M(G2Bv?ju?ZHdm6@`|$Xfw5&J~x$*aT)HD!4*9*OIalXPt zq;e)vt4!eI*)@z|B?aVdUb^Ry(f|>F#kkpq6mN=uar2{Lz30UXqpx+ zU@BC>TfvFF+W*5D?R$bSOXsHn9jQR%5SHz8?Yc7>pSgdTvEkx=`Yr#hr)Bc5Y=6~a zIt-Ft6xu2QWc%<#^BA^p()7T{yt5PWJ}u1%^eC5spxr965HV1I&}1p~O{E*>S8p9Qu#q3f5*Kg^|-6Si_NR(5m=c1};k_7iDYrg&vvZsHzM`On65qSamssCPQB+hfp%#dJUUqDw;K?MY=v;Vcis=w+f zt+8PFLNs@+FWFHKWFs_R&x+y>NyfQ}$nAV~~)Sw-uK8`26GkSTL+>t!F zZ&I~E1uMxcm|LYngh)>BG(QyP)a@ISJ;~C>JI(&hk=4+blC##1AR3!oU@lrjLOx&D zt6Y^j@~nGcH(7ci!cmt2{ZfV%hd*=r#_7m8f7X2C@|7FRiIL9eicfie(aYT#kZQh! zJ+hg!yk`AGbq&BH*>S{R=;EowY15FbIR_Vp6*$3tfk%P!!U(S_{FrWe1KEP6fT~%vi`(TU@Q1@d`-1H&d7beP6M~T! zs|WzE5;=gaaP6=h8@oE3hd>QWp7h+evMabwOJ{|hIEjH^NVs&aw3|?$b8xP+4o6c?Ll#&EkADhfT-3rk3iMr{fDZ*=K#9Cs9}i zFx|!ej|OMD&!#Ce;tuNis53P$*rY&R_ouGV%>2ssZ?!6c$=rllM2oQQTE+Xjr} zrHIoF6XZR-AS!C_O)Z|TE=4IFbX}%^z7)h zNpq~pFBXxQY&7|JtJ*P%*d*sa8a=KoR$db#dP$&o_GrJ@P84S8&i|5yM^TfCaJaBA z>A;C)Aj1jWU4i?tFdRekaKeN9&Kf8BiHGzvQYz+om&TpT(IM+RV7*08z^tEs;uk|^ zvqjc{jpvQuUKPOncZ=U$Aaf+47Hog*i312Z{~cS~%yJh(ApYh>0Ruz+FKoHEdfA$} z{NwI*&B^#RF0^jkV}7`Hn|qT{diXVAc zf^V6X^=k%4;t4u^%x0!_?j<>9 zH{uGkD)a*++)EO-$*O9ligOR5h9}imimM@>KhaE^i-sG;VmF12% zRRJ!xA)%p`_0q{&^YRS$QaTWMoNV99epIt}-AX(?4PlV66y6!Gz-wt>ju8bb8}+8K zIKAB5cSBC%@5f*-op)F~ek<_9qeK-V#*IV~`SMj*BqS15!bps;&^xdDJplq1sseTLU=`o`ipQhRhB zH{lHD5Y)U&Q$H$rM|btu#|+D7`Ka93&d5VGg&MgUli8$FsXo7B3m!Jhdd zpX}Yrwh8f}-vpNb+WRqnO*gIa6AJmxGj&L%&{JTS2iyacvLzz?;+0KIhT?^)FU}U; z`FQ_$7wY-sW81WrqfwV6%xb?TQUo*a;xEf)GGQUjIvMu*;lSSP;2(jH6PcA{hmsLt zCr7_F1koieX;J~zc0!n?UCmR)nrwoxNjq^mOWyr8<>w7fn+RtB|7v~HUg6PkXh6mM zR+6T)ah&0|Q_Yjbe1}wnU+F9IF5&TaW$c`88BE*F#qd9nFFu#k!g0WF+INCGL%f|= z^ZhV4Uf`dL?vU5+D#S2EsVkjAn$`74aS$JQK1h~K*F^2kJl>`4+t|>;e29Af;D$R^ zLSoNtH-(NAKG%LHIXKCHl1a@;i=_1&So8T9?7WiEoH`^MIOB(#_2 zAShSx8g?VE`h20{qw!XpevFq3vJYeES7(_0>zy7!J<_9=w=ZI9dFnfZUqG$K7u zhAW^eV;~m@_%EjUr{XS_W@fG~|F?1c&llGJ*EUus_9}ubwlK(Ii|zBvbU=z)o-n~# zY2uM?m)aA~@~POOz4`>Z7tRBjav0Wb|0Z{B_%Sv-mr$_&iul827%6rNNFdonnf5?6 z6i@WY!Ay?8gg4Kl6yl>cDuNy?lh@@JqeHY-7(ZqU_aGXz z%R1al-9kFnkZ-sWNb&0dci}l`e|B&u2**374NC~qs;hR49?qmg8<#%qU3HO|Ds1|S zv+kDgASB=%RjsZs(?s%3LlRfOBeCTH?^$ZDMsUUtHv=mnMAEd7!Dn$CufR zgAYm)OcgEsp4N#aYw)5hi^#~)i2BaFku)=&aGYp zxBY1Co62K{F(h*`y0WTpjR7#LBo|g}%i7*TqwYYS1P|xJPOO&Q2e<&3tT6dfM1Y-K2$4`>0lGRh>^66 zuwjq98{n5fLB11{W-uq@@APVnZf7Q{ZvUNgALfNRc^qme0$YGOpQM@!IhduqN%Ms> zaLi4&XS43!hTe}LP!gbJWgSz~pnn2`{|Bs;l(^mJUcbq92l;)GGkdM@rRka=ttX zJ)9r#xj%eGD)hFW;6TFD*uu-lvLl9JvZFURyorp{4?+B}2l-))(GQ0%2O&|gR@lxA zK=gDADo0%EZANr(GQ`Kdo_lLHJM$knO5!o^#u&tLOkiC#T0aS-w`S_^@AqPJQ<^%W zx`Yvxs%`6_`313_KOLwaC1=LnmzKFiD+|opg_2Y98%AMuufFsXW$3v~M7Ic5D0vjT z;fv(aV*|o!P#lQz1H)SxbYVox8Mn?iZuw%{H8%RCxGxWwwl&DY1N_M(f?+-{CPDiVGC#jMlj`eM6zP8 zm_%&A4YL231V+i9ckH66VtFlPy7oS1bcrpn4}en=_WQ*c(yvcb1!oJ_mdSVpmpdG; zqmRpFth_pjP{Va-l}(z&=&j}OG!(Pvf2-(DS1$sZn5)4zkI;`7LSv=3 zcA!1>T=(lVJkIjG+a#aFC5BPyxnN#B^20f!BKkB0CF^cBhe24mP6gtRoMhR$+1Wz) zPT2y4P+up$gDfy&N#{pyt1c4qvvbwyzT7%Q`@t)(2fXv~HRg6^5tyEzNNBGvujY~E zoFaKFVo=cGG4>(@2mwq*Q(qamJjN8x6|y715dc-2Q8Kp^yGQCPm>>LR2_1pFA;H5n zPhVwo6;kS5Du1tpzM`ga3UDaZn()Z{;XUiDT6y=1td$k2%9S*WX?ot#tCo?}u0C@Q~Gy23-xk$%Qb8QQuCi z+Mz-@N;TJ-JdZ1lQ#T>k%8qI|$dvOKJR^B?LKQbW6$2DetUU8j{gq6uSy!!D<A{AjhWfW5?s|* z+MT?t-K;7Fq)He8v91kqEORWuxH}B|lWDKoL+zOn3v5bl#%+Yo?5A-{=r$4*KU#yR zsMKZ#`0byYtl`q>0US2Zn8$ei!=5m0fCU}gBDGdOe3?A^r*xwEf6RjMbq<*&yBNf@|{0 z8UH?V)veeo&b_O^k zr3J1hJ2y!lftz}2p^@CFZ*Ia2XR^6ZFNf=$i{`K`L0`Nb19*O@9xM0da!M)Rm2!Wl zwfl|J252Ba7xZ#b{PwY-x{T7E`csF7v(KNr9HvwhV5^CE!BU;}YSgxHrheJmOHtfP zRMzqU+1ghSbAG9xP`9x)Jg$?42*AP3?9OMl?!SNcYujz_A+5~z+5!mkdDx_iG_0O4 zAWT~OD8qT@N*1ovxFK|XYuO~cYd0$9Z9VdlA|Pt4+9>$rfMtDP--5OvJN72opo~hF z>aOg-{UY+JuQBv{OzmaF7yNS3-43+v`@fN63EGVrK6WrLI(2X`y#HoY{y&S-nJbq- zJe+#;2sz4ME*ICvA8|c8vQg8Qd~XjAAKnKAKD}NiPc|af z#$%Q)3m>l(tBQ^lw+wXkgfhQ9-94DBwtD?NJY78ao2|Lk)zjIXPtexo>)|y0bhWAP z*Sgm7@Ub||@%XX(uy!TrdwY8L4x9D#a=Nkjo=`hWO?_qH_cHEK>tN5^?LYkZp|_&j z$@jd$7@+yqv+1$7FQRl+r@yhezR}sXHG6x)ad>@}@MG^}uu4pB=kc@LeC+V<;r5jt z{JOB>)JcqPO-yG|%;X+Gc!m-;I_>s);-OiWFZkTEhdX&K5uo9F`^@8iBcsN7Pu+7o zRjCwSSl7{Rl^?MAw)-jI^{{eAkCx`(=Irx)c#>?8lJBn_f1f;Wk>AFB9H6K7;mG~^ znEV=37vS=IdAs;d*_EQCxDWmQa(Jkt@Bs5oxIy9L-RKhc<0JQ#MDNOPfgQOHfc138&t7gG0pGCao~xcI+$nQ^H=o;=+sQ$XtIGhtzy0aynp>TztlS-y2g4~X zH}gwEf<2u+Vffbv?x{$piLnbJpk;R-QQ`1!W`vAbf?!Y!0z_;ee6O~M+N zmOuABiurf+Df0~U4Zr@;RX%opuKzUsovhHS?}v8V+qvzBcGKJW^IL=W^GeLK#rtgo z^I?9|`1I6KKIiy!cTT_`VnP0cC7w56X9($gvijvPUMFdPN}hu1RfLbdtU=`6?9)4@6W(V&{28vnsEYo8}G>ZK}Z+DLYPvfK?dvyUH5sw}L z?|+55eZ6kE%~Uw&g+wpbV$K{sFE39Y)0qwOWJ50N-nZ#DH*2l3>+M!6tzIJ@je<}g zK4zcf4d#Z2|IL|{RP9Bygm9ak^Rm#L$>91Vd1yy1t;4Chvg|avo~W|h+tVsn#h;hf zBDmay#ZCoTPvxtb!Jmx~US=|5?{~|{va1s@Q`u>%3e?2ALnMjAQoWt70p|Te zg8480KTC1=38D+FvukbMM=eyRtP~e#u-sG@>{i@R)s7+P|HLE=RdW8|SM%nQ@vAOr z!LI%{HtXf#!us^pP*YFH5d&}BySm-z;Y{A0lh&fIaw}C#uZff0V6n;W@AWQ2fw9od ztg;)GdfA=)svp>(PI$i3w5M=4+6jzwW?a4!B%c;G;a#DvjW(w2RvFczFV`a1eI)O% zHv0~5t|f**W8BAC;d?6Of5YP-IR*A0Dd&&_vj`p)tO;g%OP0m32DKK%IAia(FIKv9 zQvPPDf8{_P|7wud$urqMgR2ZbdHsuUyEk+!B~PZqsXQ$oMlOw`4cvTwq!V}7_teKh z_g#xyj~B;|zwv%KcGKTWb;?5DjU7vG7FT`@>D-6|abgH+LK2f*&710Ch*ig*z>rG8 zK5&JDZELEm8`b@=EH9e7<79Pait51Do$JDFh+vO7yUc%il#zvghez@HI^#RiGkbw-Z+Wo6 z)SiX80=IJZSGcVruCn&j-j=6t2^-cc+nY|U6*)!W+Sa|FvkPK#05H(I7I`xx0-JW^ zMBor&YyB?+c}x^VkW_iAI0SBbx&kh>37o7N&n?t_JOMJnRg?I zOUpPeC6j^21t^H?Gx5q^_dE+)-w1`%G9hU*2>8pV+J~mvSw*MQX&K z@UpQqP%2^MfazN31X?6h@U@R_z7X%*d`(dhx>FcNyi|Lv%(aoyzU29mW@q5a7%}25 zInSU3PCFrOr3PqzLb|E!WfVdI#Y#Op3yrAzI|a#)^LHt(-pXPxM0z$Ic$-MMa{Rh; zN$f-#t*(q!G9}$Z&rRl-U;8pnkK-;Q!=$;i*i3k!^5q zz83uY>1^3vbP}2v7np%jL2uZWk+!q{P^waf zsHoXg*jFk`@Q`#q#}iSl!3#YrpV{1m|ACk=q1$+p8F5Rhc=cIc;mIc6`W!#1FXnl^ z5=$S-%q2KyfZS5<5CArb0_K!!3!X&`vD7(gam6m1Fl6K`OYO*FbT1<{^zC^2%=hPq2i2u_t=Rv&&-` z=|i%K!Pfz@h4amyofOMThsXr9?u>`i&f}GcaYQjT)vB%NDGD=GPH?M9?{LBmVp6)) zI$Bx8UqBI0%<<^%{W(4DKI1`y`fIA87%G9nUX|5-PWi=_#G?@jM{!4-E*nz?G?q;k z2zDWj7G>j(cxRgwdWc)43ya+=(qf@B!>0N2*K|`DgO%4fYa=shv?C5GNu3r5KUOxX zu#47Z)+rshndpeIw1#uMQXTbr81psWaRfbquyIcLRb;3#*D3WZCNZ$bKa}#|xA7Ir z8P+P48KM9->~UN3UM-m4guy|MC*;h3@ax^ZlB&lknw~=0qsHm(f1GB0w5FvY@G1jA zg*OhSLjcYg##v$aM|_ana_#}a(p@>q#tw1jyZ|FB|0bE}z&nSN6P!}OzBZFtyZ@Q` z!Z(d-m?HAfnDSz5FqXf!R;1&4TFWN4xU?Zmn&V1U}99;T9E5RshW( z!;+aHJ8xB)wd@+$G?`q^BW@?KBuR=$H4>L(3(7G1v?e=>wVsr%Dev!Y)CN=F z#)Czv6bwVa0B)~LNl-&3@niby5SDvM^9nk4L`Kcg>ahv9IU(hW<=@^SE>y}2Q~aP> z#;~Ykrw-_OlRsawy`_HQt;;K+)r(UMQk-XDsY03Ac&j^sW!doOksbIEQ5nlQ5XvX> zeGw`X@7Q9twiX_syVeD->;oJdU8M0ww!rSCR{$0uD0KLt;^C-Cp6} zszq%AQD${+$pmw&Yo0O7Fde|NCgWR~J3Tt$>VpWS=5&^GgcG6Q)hctR_rEFGbrp1!;@X{E+l2+EmV&X z=meK0LJEIJWEFw_%s8-PM2rP~#rq|bXpRw))*MR$&v2pDmKaxW@9No0$88}qW-LVm zOYLARmmPk`7LYhDpQLOLg_GnfQ%@Pi8l#X5(hHO1e|n(Cu>rcrqZA|QlW5yG7A#f>D z#KZA8W4V7RXI6CP^p~Od0mMv)lNBP9Zu-mUthyJBT>G6zg77AVi#7*-?2rI=M_zZ$ z=`*ew$FNzYJ&udC+Uc(=p`tV6kE>s4FE}x=Q76|mTVz}2^(Nja475SEIfDw!7Yt#n zR<=XpgN3<__{qPg;>FWi&08`kFGeu34DfUTQ(0*h74}ig@PAs@7vS5HCXyCx=)%kL zc@xu)OC71>CUZd*>M)uMbxa(^017uKk62suZv&^Id>s>IMKM5-^svuh&pDZf*ezSO zC~Xue%~3bI##T)lE2ZXn4ky22*`sY`7gzBOuDbKE=V!K%$CgWgf7K|%8Kqmp*;!HF z;~}$MWv*3TwL6rwBOR@rY>IT5O{liL8VzQOLN&;8S;K0rH~|%X5UXv22mGe}1~l|8 z9UI+%PIpP#8qjTPmAT0D&RVo{k~?*Ia8Z@ftx{`P!BA$H^lH9ktKb?^eNyWtI)Ubx zz)Qp{Z%mB09AAXzp9a0b$G)hCH4PPm$~$&*We%O?9ZXhDY}%#Xpj zOR@&R!EYUUXhFZ+8iI=s6SWncqq?ERX2xk zdth2d=?sNk36@O>&YAa692bt^!w6cE;a_yx;nT41cYqiH=xn&PK!Jyr=U5f}4%>*v zpmT#$DHXp+O-ubZn<#Ar{JB-rNJc@iPHPJ(3K`uh9lRW384&-5(0c$|SY$-Sev>D5 z?_At|!aED<(l$Y*v@DrBmOBHoaErBEt81D3{|HnQZt~+c4f>dv-*#W=C{x1YHCkfe<%|+jB=1xuzRfQzP5fA~}L^>oy$2zcTL5tqZF=zn_JpaD}XOJ?(Pmu8E9$`>s={{ioY~; zgnqs(p~4te=vvc$dKb?scjpQL)TM$6PoUTo8#{-pT&hZga23)ZgSm7U-AfEl>GUtz zlX=I-Bn(QYzy`o?o%+a5mFxO?YOOrSQe+~8VTej7i9U>WtOpRNI;Ar)zCKo%>Adwt zwSdt(=TCkKEvcnZ{Ef)St&BkhZT{g7wAn;1E;NO-dI(K$ED42O2 zxqPhdMxaigWQ~%Mv~shp!XUVH?FCOlb;5JDKlLUU2gq>}r#OEiSK(s=c`A3-h6>Nw z0ib!m9!=rQp=UOfV!-1kq0gvhByO5ZsuKk_=UEmfKo@|1ab@{e5D@HnxWMG@r<7|N zwMa&tST5~=ShTu=hh7`ouZKu3N>{PpCLt_mB<`J!{?BrHUucl%o<@RhjJfuc%KnC6GEgi}jIqU8|#m0$z1VE42P@MBcouWtUlYdIj} z84qU1n88AtX}PDS^RH8;kw|I3WpIYZ#(YBL8~p+J%AY){qM2t+xNjrfb(X~P8;+mi zaUtMMqU2v&6Kuv~RBQ3ioLCYLrhv=|9d`mPZ3HoQz?gs`*7w9D_z#NoR4l934c(%T z@o$$hmB!hlSh6K8w4i)N)@a%v{JN*SOtj((>RKU+au4{)odj4dEPEul+1$CmLmOLj=wi*s3St&swN0MULjZmge za7EFa(6AYu%dt_frr64rw@_LU0NaKT#_#^4%-ulMzQrcuM=(npP_WRcpkOh$r|W3j zc=KLIDy$=Zq{P-22)6l$*~0RU-k_%wig*k-4Kg{BsDd<(aYmg;3*^O6vL2m0?;ij$ zDNlb(P^K~SZ9qlJ?R=x4M2R~#_E+r}@*}DIduFG%gY7t+%aCzGCV1C{3*jonabSJbHm)UOjF+)on1R2ZK0L@>-NoJ^Aq7qPve=A$&dINfl zIdAEIF|d{{lm|UIYk*uU83byq9>+Kb@U+SAt<2ui_5c%z;230X?iA%CrjcaT3tK}c z%RL6suNc)`)N`E1+O$ZchV1{06dQmiueiJY#N!_@iG@MgqSn098jDOKlG2{fTImwD zHr8&iH(VKz8{2AKG4sI!wKwGWw&DNU8~0v~g(Z=-NQzk_=$PdkibO3aro?@Bxw7GU z{5O;4c+O%ELTU^NAmyS#v@{4+N(dFQLLd2Ju85xT!t5M_uL4e zlbIe|BvhFb_wF$IUK+@`3Fj=eJTA#1S11XSO#mvcm892eHH`aTU=4A2=yrlX8zkT{ z`YsBWSC}zKXth-Z1LOhP^J^m#KspSHY!2>Dw8X$r@-TFfmGuDmU9?6@07jsD2~5

_mJH{;_)@an$7~5mAkIuYSJY|g=z{73QpSur}Sx}v$*=ko*#v>H&+0&Gxrcy zJe1k^gE~>c(P}*Z`W>UYbzj)2*~ z>6E}P`P%QQO@A{~CJnPL7W;&C2b_OJ=kGhX-!s1aTOg2_;gOC-VV$ZR@S$ zK}d?x_UM%~=m@>OnW=0iw5m*tt9a<*@ygVR5=>$2_;yqO3QNv;pw8~cKhhS@wEX+w zj&Z}_p0v8JuHekXjp6t_Zm`d$N!Qyy#Z^CW@dw-O&V7Y7j9p|!cmLlGgZGNY(tY&O z(0%jZ7pr9I)h@*%@$NC5%cN36`Vjhcb^iG;D_6hcJJd#4c7VS#$(rlrjg%y_1^!Jr zNdR8{Qrhr8I}GQ)K^^-F31xCAmGtnY^S*h!_H1yJiN=L*)w&it*=;M|B@8tlGn-M_ z49%Xnw5Q9=5#7U=0Smf^E}rJ6Fiar`dIuJRf1z(CYi$Xg0r4}6nHJ$Al}sTo)%4jf zxTjk+3VGg75&|lFUQR7~{CwOzZqKqw^*`RK@;^RM1D^W>-tMPAuRkt9yXnK}%t{~O zs`8U}3CgbzyDw`!9UU+42jl5K<_ccV8s@p#(o=J&+hlOYLS2Y{TxnA*IZGvpX2qIo zj33dV{H#}VROi>4Cy_~n(pt&nzIzG$sy$Xc!YhNp<&_XZ=fQq~q=nrP^g06foVYEt zjsb*R35Onm2d^!<&<{HZet*`#+-m~0y`1;$^#uy3_J$UT=s8zX1rbxkL8WH29?7>h zPLvs>Q7oF%rq)2}?cPK=qJ5@rJa1ZCco7`*yYt-|pLftcOsotVIEO$hyf>Ex1u=td zf;x94#O@=-Y8?00n$pzbUp!LWz!qD|75QC!;5sLa0fIwcCvng;P<=`~z`y}wTo&*#BsLv@?;|*v+C$qm;SoE8XtF~#{jo74sB|?att;jZc$G^k(MH<42}6019F6ITupEv1 zA?Z?^I}BySkcj~V?R!i!P%S~ShDnhF*^PtU?=fZ#@`VyY3+0LJjbq%Qli%S`o23`= zoBc4Be}{oLv?shqanRT8J``LVdLok_wL-GF2jEfKB-=Vs(7x^EHX184E$@2IO?|XE z;;$d8=19ZZijcB7?*Eh`7qMpCiKWU@nL6&JqO$2jIl^D@GWPqv6N<)#PM5qM47`?) z4~7iYgLbzTxO`s`!VPYjz2JF8@fCFu`UyLKR{jf^IysU8@D&1MwsDQl(1%B7Wa5uD zSGSEb;h(`W4hs`x3&{$i&)D=P*7|fY!S^+NQEWw86`^}YCrT5kNFR))Tw1`i#i|X_ zX7@DIH4LC(5D!Km+jLJ zQVm^h`@XEI>3O#xC2h&w^rKWJB5>`nm2UwsMHs1Q^vNj(9Zx;hUx+iM$AMHZc7&a9 zn0wz0Ag59E3h~h1Z6WkSBWHt%(?r}6S}V1k4Mn1iZ_;}+z0>BmMB(k`1+Keh4x5Z+knF2-olp91K zMnxs<%JQOODmuc;Ba2e{t*Tooy|83D(&2E$=phup|Dl^)E6;RR_AwQfb9A1G4}mOr z?S8eBlM(Q8&&nYtkb&F2GCUZc=yI$g_R_(UCnu1RE|jJ~YjQ|3jB#Xc#0Z*kf5$-Gej@9PRul^^is+?zSMMA}>u;wpE<*yJ@A`yPxtsu^#th(9ULYCqJ9J%Q z6;&n<*{ty~B|5WNsHaL>(581Z8aN{QT{DI5IeN%uuw!98;nZc&JAzECDm*hw_e3b7 z4z;gT3S7F*;WNPfv?>V>w{bGAY)tG)_wP+oG_aqDm!&nWg@@%B4U)Pbe%tXJQox@_ zW*xBSD7&{HA~zviMp!tE#e@wLkKZ$t+9lu!3Rica}?7 z9$Se)q6#~@7PAL+ER@^s2Ll?+*!8H%>Z!##9!W`AmG4SenF>`v3{Y_n%h+e^C7Ho{ z+S;)=Qu01iT}zDMK6a79DQDoi`~6%J#~edFnl59%|6)TPRsRIRDt>C}>}8MC1-vbd z{?1ItJ)<)XqyQV#N6hubyrU>VB%TnR{l*k#f{W1Gdw?Y>2y<*!qY!f=gAO;Qt`?As zN#0Bv!W`vVK~WMjhWwNU&RTdwT+uIzv>^$LKy2KK!=#Nz{u5C;Aj4L-y%00XoGp#; zH!`@-bB)1+dTBAP$atkW8*_3*LDfk7FZRKM81hNh~t8O5EQNOoQ&j=IvTf>5o? zr@t@cYHFHf4Mn;d|Glu+G%GWuP?2NFn>9Bcupp0)SXrJVjo)Yoh)raxea5yZ;KH*v zZQeEgmv6{X(RS&MVhzOD9{J)0DR`4pi&O14R5q_&m=pwlU1HAhsfV%|pln&RG8-MY ztwUElW5>S9kG`5#d{ERFOl|l(^UGssI?Qg27Wm)BCXWNz^AL6xT8p9NOu6C|3S=ha zv5m~t?E_m)%^?iPP;050F*n*5-G>wvlTgyuRN?2+VtZ->Ef8t4GQFvELMw6+f?0Bt zc(8kaDHd7IaQ#qCg^kKC^+z*pUB1Ol{=iwo?Fex>G5iIF;ms?JLE`XbUtZW|dV#%^ zlJlu;B?{-KxI~IpRWcqZ-USLXUA(|10WXrokW~#8mIrAOyS#XHY<{=^y7dK0MV>Kx zsKie8dxoJuphb`{E(;VL8ylDkbi)7-X-3YTiz0Dy8mi!dSo?L2`?yV|foo*AjvHWT zSOS4Mp$!{&`2iKM#;Z1M>J}UF9gIz$7hEzV;}i%3s?Kg1`-xPl{@_L87#}AAI|zf& z;8}F}ZN)=PxdHvjD+%Hwu{=DrL%szY8W88nj5KfM43MR1fu=V4S(a^)ddcN@d`t5*`!F zNY25E-8Dyt{F0hjeH1SA{j4sU0?g(!_?JNJ&LS%zf}*t{C1>#zZIy;Lhzu=}TWCCDemTsvu@S-AS(a-&Ypz>8HOM+{Pj>V19qRF13Ruv5B z!#ws)Ewa~yGD=LA*yBpMM^-M&W(;&KpkGQHgrk#;O8qC|qW`d0j&iQ#icB77;Y3Es2J~xQ~OM8{;VR0(;-u2IMZwttHM(I%!!u( zJ=jXZ9mI-(RSfu29jYE>vpkQsf#`Oy^+!Ieq%&T4=kIbVhT?Rb6Y_$sDU)3i4X85l zOj2TP{0CD@kYQXZk{yEeLlZ&L8v8wZ*3(`x%6vGPFbCe^G*d$qzU;fYgA78>bE*J9 z-qq3~z!ebE> zT(5;={&3zKl>ni2HPqyS@_#?o5A``NGSc5j*K?<*qUU2O9NH%R1M^-?N`%Tg8V9=r z{0s>`1ZFVKj=}oUhELLI+Rv_N)j($th!xM?je54cSd~!d!8zg@MKE#?2ppF4Uv?H4 z;Pc{ec&QO(=H|o#M-PoA><0k&476@(UOeL7#%*weelKl%6^pPSf21S`5CNLYmfN;P zhSb+~sjoWG$r2B3xRi{EK6;^ z!c5FHBj3*u%)Dh>3R4@5%8GpU@qz{REqMVoq*y47gnjq%C~ymV(*v7ikYwB4QF&SV zNXg!|Z!Qr@*Y<-n7mb-O7}9P?&ks6Gq$BaCMP)avB#$4t%Jlx!#d5RsFI7dR1GEmK z+}kKx5+JAm0E1q7XEu_a48S3`R`}S&?|8&J;F7i6L8d8Z5^Xx3hfrHOCiXDlY9u;z z_2Hx883u2cjG;16@`U7u>Ldhvi8(+&CS5qv%xElI6Oc`TI|65=^zd@gA^AGu2#}#& z+}`I_n>G${KB+KGPlFm=Wc(^>qGpA0H7Dtbn+kdnWtB=+ET4}!>jPZFD8d~TF51)H z=R8iuf--|08;^g6?kxZ<{&AeTxXmwt2bP!_l<9}0~%ubFG_vAGK# zy^bf{i0vUvLchj~Lf~TNa0U&0Kdg{+;j$a#CX)D&@58j0zN{t8nBX z6HJf*qH~uW-F9qJ6OZ4r$E6fI(MO8NOcFcxK2&vJUF!#4GPUL$&I$mFT<)I)G(zum7Yt4TkbC9Ib zR6Nm6K7C-Qz;3F54t2(*rp|M_Yq}qQfwOZEsS-+q>?@`R$y0*XJn>(WldY*#N5C3X7FgXE(sXWr;luB^A6+Y%Xm(&>JLul9W#Zs1Nwm1dqbPc5DA6o9d^; zG2Ww}MJyL97O@a|_G8+V*fQHZtGX1RS5lOJKOC-8?iVjdRQw5rYwTFRtL{5lD5EOY z(1`XT?q=eaZ52O9vdC8Qj$A)9S6XaE!|$tNNeVZ4sWlMe=0+qZrBWr*i$n?Fi5qdjPzm4l155{N`#|=@K+qd zv(whKrJqV5F7kNo72HhhNR!=Y(pY8d`=JDXnL9b$LQu!B-MViOoXzKTJz{T8PccNo_s}XW zU+IyvIXa6COXL)46f~OYO_FcVt~weK9Yq8&I#XjJE}t(xP++%OJSIk@_m^I`SrAYD zhhlda{wi@!GHu(PxFaJ+kV0rMo7*qwJ8pK6^+ur-4q4jM64RoKm|vdlQZfeV=1%rL zjIUfYMsTMV za$+;q`_fy~tgM!6Jc`c!2ni`CZX0!W)}W!=c!ePCiv}DXVfaEE5Q)&_^J^zX;bw|{C0YlL>BNRpV%l~M#6BYHLPWs`L1u(t z69pYY@H15X8}r#`ArPXViHklu+S0Q)`{rge5l{0i_b>%Juz%XJSdO+hAAgR*%KPDJ znvv;zDTJ#Iib0qiJZFA8mSFKGFUVSZwa&?TZQQ+^Gwn-5kKY5lTzGn2P(EI7|eD%%aO=?^$ucNItBLnZFF#ufE=X7Go z`}%BaZ*_`Ou<_Kf$;`Y!I&^Me53H&C} zyTVy$BnjN6d-=mmue^Qr7rvbyOIQ0+(|hQu8IaCa-X(gvl$&yd$s+YTA{w1O@%u5W zTVqXwdcH(pW*)gj`k|ebH-u+}pDIqz4jtpuh}l+M+NXd#l*%VE*|8RO8&N;ovut66 zC78K5{ug8zz79CQO~#{6uS2ajkBzAD8<6XW1N>v2w)9gOO+OLYau{GzFoPk2*0Ch>+A#3R1B54crHjkt%P9=NT=p=POEMfYj!c?yQo<{MO7ZYbXw(9P zaVZ}XcobxyHYd!fXY1C&G|NeY3Gx6jZU_Ug9XjFfFi@ej_-&X7;mes3p@ok;Bkxy4 z?~RsP*-N0Gd)ig{C=3JN@HAH$MO@O*q`!uj@)?uG98;$iA^=6q4uV%O@cg4rN&m12 zMWde*L#tFZKV?28D>=I(TtHGZxpw}CpHzvL*P{`0^kxtd$7R0URNe{xqB{NcO(*uA$nSW{HXwHX9Z2fy zBcPBilPIVJiP+<3R?U;BY-QRzU9PSI+(E%GP|6qc*PE?3mBkMI zEGloJe%q_mWlBzaU6ddGT;v#04v65ygi@vF7U?=RTXm0WJh7_qm3SCWmr&!fEPo@`?(XjBY;SA*s)w$ga8dUz zGIS19UcN66)oZTvxyqaVCezY%wZ40|xDOKGqBB3Q0PXFAp!@Lc-L;PGKAlTO?=VhI z&hu;d@CP3!clbh3EDj+62=Z|{_24mYcXIM@arg3ZP&ik}^sKFGYH6yiFKkh$AX?jH zG)QDALhP7W)VteSIlJ3iofvSP!W3N}_B#=3z}+1~b-9BDrOtsCK!-d3zTd`z|LdG0 zVpA|GVmHq7|LnJMvM_RdZ>r+#XlZBuS0#eHQDzxr=I5vfdI!AzvOLqOsLy)xMTG2~ zhP6f;Ag&OpL60w*G=ybh zU9UgAV?eFgI3hw@vtm@lM+NNcm)aUV3S@|!g5YA?J^Astx>582W@QaJ=?)2=9p1xo zM#`Wx?10MD$_Pr%4`K9=+9d69JYR*CMu&TP$4ZjZhUw~Qbhe&F*P};l&GqCdzD z6w_71dBuevMe%%j{H3uV%Qecs?RaW6 z?uu4L6oC*WMM8S|pMSu_$i_s(!pPF@_hxLHnE|^w^{0W$UcOMjEH4UdDJ4@Q-caKZ z{dXP@K9&7MF`fAumeX`CAKGbgnI`v<>G@?w<|UN7e_%r}V<>e|0W?*Z*tg3e;4(L` z=?FhMaJ~dL-0n_g`Xtq)ZeJfB+4HsN%Gb;ZDopKeH0-xD)Cp$n`+)fb^f*ra`%XVx zU)%bWMlH=uWsIbggjSmH#%)-3-<9yV>J2=B z7BUeJ>IOujSs>4U$jJ*1>F6h69`^SG7{_ekrGfg{R+y9KlAa|T&@^$`xk3O8= z$E0(neWMJ+|9A9bCei#gaF_sw0zW)IULqYoSoae`MI4d)S^qJIoIc_0Mk$2Qb_|wIZLO9joU-9m^u2V6?5${*NA4B$4!29 zDMn?#5)liGz@#G9W*ImA)uR}fO(&ujSdRIgc$Q`S{jWa7q-Z8&f;VQHOwFwwsVyP)mO81nqO+~nD8zJeYFpPKC2021d=)S; zuYXiqA%ts+J0=f2YD@V{(`oghXPIpt*Oh>SA&3TVBu`GMDMG8VOfoJh&j4%aR+dxQ zNht=p^3+${||a}DVse7sAe#bkerrs2it5|>2Q{?@F?R&#cOr>^4=oiK>2 z8kvo;_E8>|)NHZMju9m8HT&KxrZgUHXx>bzvL>K1#;zbtZg830LzNH*F*(`u+^RIV_?|y=H{6=$Um-5Uaz1To&EazE%B;2 zb{08S{^TB@0c<)tfNc**zL;yW)!U=aOA1dlj>K(}p>7Zc&GNrK2nO!zUT&!=usqpm zeX-l>GsMsk35PdaWBi*1au(zUw;?Rg?{Z?~h*DxEE>6xMTTR>lsVHXY{NDCYT?j-8 zzW-ZJ452Omq1KRx%$O)k4NJUn^XY?bPop>8v2iE&SaurLa`=~R^4}b;;+5ox zaxewwrBCiqVMDl34R#9xZ=z*N?Y~P(56^npg0`h(bbLJ`4Xk3k-e;At@s6L%(p5 zkZAsMAF@Zd0M73nRo**0|MmqK?_DzkjV(=fl+%5f*2C_o{;XLO!`77+MRjLdr@!=faW7moG6svisi4(mF| zCNpY$@=~AJgV(v(xH;ugx6gelK3O?K(>#D^fL!P19f1MoVawR8+2p2q5?<90aikmD z^cNnT6ed!qj0qLQl{Bh^+;p6dEk`}W)(u(g6d%_d6+Z3maws(6O8^`oW?Etl{G~TH zJu4|~jYL>IhJkl2V^-)m-YIMzw{J)6J1ncMXCa(PW!!|I*s%{wl*f}IBX}G^gl?R` zb$yRiiQC|>+A5sZCVEn#hSJlY=*4PYS5pKC;x>O_e1e{k%n7d&m7j75NUhQtUnz$$ zS8}r_o62twzZv|zU}Vo;In%STnq6O^MRzFIX21ZiIU~6sY62ehczs)*HOMI9cE#F- zopY|K+HqGOp4W7NGWQ}ed5;cCDqfnG8+8dUb_@YO5cKP>UvGri4i{|QsaVe|^e4x5 zWwv@b)Qzx5R27oH`9=?P*>9|u2Gb;}TuQt56k0}Shu#8+-=vYSm^+}T7!-N6wr7R! zmH@T&_UgX9*Z#D+YAa51WV3I#!hhUsdIhc4Y!zR3#k*VJec^U}xvyfIp2%6+t-<7G z(BXA85My$e774wWxBk-b{Pvf0hgRlz2ab1UEWuJ*m-edO5s&#+oMlKc@mXKXmTWqu z?bn0&4%lr+7e|wHar_R9L4S*`Wt!*euIx5*xBVfkxeFjrdd*ik_M?iPiu;lU_ByUN!jIG9;xyUM%rm)z$cL$&+&E86fv&BLo%VzKI2AC1O-D|-hrF1K%08@Q;Ir15w9{~K zYq~)l!JPZOI$svb@Yn)O^?NT%z3jHAZeO}Lr3g*tCt}5u=*3rm4URiv8MI`6YHb0h zeSda+Gxu~kD-Rd24XYwvZUd$4!ZXvz@WO%LMUhFQS#UFHgaF-mW! zBH)`3$k-*|AFhR!F&m`u{B!T06~9TM-I8E)%E^+-~&l0HjW(8g{3jnl62p&OLE zBcd+{*$71N7{BHReW^KZ8%y-yZlG-E*mnpOL35-pcO+d#w8I?(T#k$|@f7~|}@+s6W21AZYQ0HTqX;M!)i7qQ+&h}yW0be+G$+RTIG^tHIm#u)4$10(1ldtSSPi<>FDyrq{ z=pH=N53cG8)t>BIMMAuy^#zvQOibd{;crbzyoqMHMkyL@u|Lt}=cnd7X({h9N&->4 z49?_Rd>%h}l9Kq85)$IBJ^7K$4bpPFu>l>aScnCnN?b2u*SKzr1zsx5!m098Tg#PdCIV|h)BZ*NwG?qt0$nL)dqbu=gt>@PdS z8J%r{UuS0VieFK!Y3!TckVdK@YUE+8_^<7Nb7G%`{Rs4}&pPbCrr1Q4Xdqt;r1z-B zQ9gLBYBz^Ef46O@CIlx$q*@NAS`F{lq1^7hIGJN7nqt3D`lvS~;tL?|{$)XM^bkN1 zY~cMg+PuU{1WQ~6tCN4qa9+>v5P+p|r#P|n?-jSuUnW19ML1N|h`{5rES3Na$+43A<(nl<(xNmX#!9T1z9x4u`?-G?5FAJ_UC z*k9VU$qtkxo^%)lb6#auaAXST`?$4S9^Jw#EcnlI0~_`Oqv*z$3 zPlHc@8&pVVCJUlpF>-^*2hvzBwx)e+ImHOQ4vtg(g3GLoCOzw)gM`G0yGJ!`33qW7 zY;L%CI{3_-WlRct8xPH0%oHBAoek`B%rqiMNMETN(x=&{+4yS4%DVHOn?1NpZ~|Cx zo;1P(J)MbY6k@)5`jO|>03zTBd;Q;a)*g#G4Q~klFcHBggY|KTYHE#OPTduo;aKZMgLq`%n^pxn#1(ed#zz z)p1m`^$VjkHXNvw^7OgydUzDrAZKcTUO%hbz2I>P2^JNPuSnn4S;11^QD@tCTTD&H zRx#f^vb6$H)P1au=|g1N^_Zn$p?P+7W8T-dp!<4QhF++a7=UjarrJB|onENco7)YTHju`!%)y4Vf+ibFgZY;YTnyedBMl-xZos{nIOxpUUzuN=V z$IVv+6PyV3{rAl4K=GyCK)l0)n0-V@v`Bx#+uwohUq<<_==Nm}B@%7cOT;Jt_m^p; zoVEY&I?@5RI4tKxE>p#!4h}s8XTvg<*;7wpFPMm>jOpU;&c(sAXKZ||{nsNI0t0ED zv>WW&DDEjjIi){^=1BZ%rf8H!R{XyDC5_Od5G;@HlFzmRP@nnZ{2FJcjr|!AaokJn z8UKE9+^b!xbqJq(hmlZO<2vmikT9sRx3_d-F-p{$w!&Ikl1K_3 z3Sm*#T&@Uh`Upq%UKDN98H#vo2$&+(&E+dg;mWh}WgM%k8<~ zVxwSF#?(;?lo9HuactKV;ktQyFbmy>R_Kpk4}{v^1n}Bc^q#WrRX1TYciPOIEJs??{h&GhXlq9u>H*#b>3wHEm{X=+?ZnVyGsnmMT^%B%NwJV(@G*LoAyGi}D1AQ>MHH3@V%(05ZhaHJCB*wp?ui<;w z&{kpi@VB5ZQh94+EoM;@L()Z>A6AbR?D1Qe(UweVi`}_M7f&wAt>bMatPVb)0@Q-T zPF%(`U&wJRsTP>}S;qi--LZ+U#_IzEi|uFf?c8a9UYtKkNA$q>ILLDP*>R1aSj$vK z8Q3!xG~e{GjVD3VUmw+$x}SdHtP)*I{RO>)MZuCcQoKh-b3W+eEZQ#z-eRaey;c)e zT{$kY>nJ%_2jjk)UoQ<+#ox;uk+Wu#B}Cn=IJlgVtKuDl zA9?SzQ7D|f{Pfb+u^B6ot1IJl#kN$~c39kx?pHT@v826p@QX_IZ>amV zd-&OvGFn=5#3?eb%p(;DXMoXs0TGeQYW|aTn-#_a2mT-E4?&qJ#W!fXDQh>R^g8z1 zeu;c*+_QrtIfcWuIeI0XtYUOp+sM_=eJfY%S6m6Llrp-VT7vv-EzZ8!LU+m~f&)+Y z$i8;ppLq1+nI1E z`{9D6r&dR?9W~?vU$KumRlBsyPDQ<|isA`*bmZs7B^uozCG$pPdG=-v-El_=c;tI$ zkF_VY<>HTmBbVakR;E<*=Cmxg#pHWy!D*h@F^$WyxJKM!uI@mo>TR>+oax9c&ejJ| zvqNx9Uz~D-T6PM|D#*GauE?GWr{WEHcKEQ!gxgx#JRF}J;vl`9 zVXs!5@NSE;j1Exj0Bk*F(Uo^I9s-BltcFx2G|mFkVjxbqDTxP`Dt7S-arHeKj7~tg z{dfF9z%2{#Sm&Ugwng6?NhnM2apeU6n;Ii;om@{eC`R1&p$$5Qt)ehg>xTYd&_RI$ zVpTa1TQc0RVda0!b3RAfp{4JM&&|fu!wD%`2)R75E$44@2~cJp^BS>Xc@I{YyC`{wMWfRsp!0;5LbXb_b81V~qd)$(MB;mC^#UgYf)$luAeH#ZV464JFc z!`8W*8;n&7`8Wx_@vd=Gbx=+Q_k_x?dkD`A!Acq>!K$iX4v(O#^sk3}JA_jwE1qLy zWY;|Z02X&A45HNzc5DuB23%GgHrfp>I`9>EQ~e@8nE%$HGRJ`p81^RlC4YP7<&0lK zr(U*1nA)A~DZxBR;HehLGR12~<34NPxc73O`D$kr7}Pz%R>v@}(wG>iACkM}Zn@D* zl7||fSXPo@RY84PJnL5Mb{?{k#vdHHVtLtdV2%gwojTEiIYl-Bvi!mo^{H<|yx-Lr z?E)LLgg#-it#VRkuI!ksS_(^P`R%|k`HSQ|#`oK1D~6o5)AE;dZ3@EDV0V>2TuuoW z1VwQMvGX+IL4()GyQC*UWMh$zC-mS%??~BwtHieMx~ogSQ`A$!O%;mLZH))%F9qp` z0l3J#vQE04`un9Vrxt{Vv1BMW(h!ZXZOheIAC-dFErq(myKg)c?J`8A7c^QhE>J#X zJ2q#!c(d?+MV;~qnatFMkJRe4Ch8fQm*08BonDHIuM+WV>@oPHlT9bDWUJUXX-2l6 z#>jy*TS{I^bn*StIkEp37kiAf_Si}=Jb!zfp-4e9vKjE?1g?8cfO6Y?PI1K08wwb} zPBVyuqnA%vXuV(a_O$zB*EIBL= zFRXtTIUK2F6a=8n1mvoqho5%{>BfsGwANnnco%D+`xJ0x`_qhFtgSo= z=S3?iJki6dk~v4-oj=rQ{({*R?lkDVO1R32pUA+P03WX{JXC*@b^=>L3IDF*=~OJmIyxFR=GX#AE?1G?;?P@2};a3=uO ze+!xa4*uyr7|zRhNm3;;d$Y`3=49azV_mr4{gmt{*-`u0iIcQ$?)-u6t)ko6kzu2I zx&AW2u^-_^A#oVLFtydXdj zm`k(l)Am3YN=Yexc*7jG=E^x7doT^G*6aXEpH#5gtPJkFUTP6eR;rKpZEE&TQ%G)c zIy8FTJT%cYz$g~22+Qj$S!5ZcwZmAET}wSONFn56xvjRj!CJ>*!+G6g&1Zkcs*^U!(SCkPcJ0!duxXb=IHqLy&4mH<#{lfvc!84sq z)ZkZFZ&$z&q??|m#$t%#JSL~6}vc=m_R6jRmFS*-`%p_OAtiIQP0Vm5df!3OL z&hupLZ|AR2E(he=u>y}zH!nV!*=0;^!&LYcmTe~VKCm@3t27&K!=3?_g;X( zbXc7I($6~78&dN{as&_&Oh(Yc9-mQiLf8`RQRUXoV6LH8C+ zZ$KL7Dlw4ZMV7&Wweg~3PJE~kW@yCsHmr|UgDArs`l7_Vw$VNV$rWP`gqe<3p>nqg z^RvtN(RElqiNoesXZ<~E%3vChmg!EHiBF!;fgY#8zS#mCOwAB%VH70o+7P6{;b($- z$c4|>`)2WH_VMc86my>~pQ12p%hjqy@`46PWCCFztdo*>*h9*q!*|@B)&XR=Pp}|3h@$J zO(5PRUE$H#u}8;ZMIQ{pvSe=v!j4P{OcV&3Yb5I;8iWP6Ee4;R5%7q*&BMr&DEHyp{&bhw?B6EZM~J991T801DM}v z8=;UvB@YW@{p|CS(Z|?;~Fk8EZmR(q!v&b{+FRuUa!6+Q|%vtwZ@Z8J0aURz%EX&0l%GBrU_~KFBeDHh* zQm)SMqRRNpKgBY@Inrq$cicj#EHybS_QN6T2J3P@W7kM84sfXoqv?D_!>Q4NbR*+& z;TR9-GLGTO2y?EX3)^L!1pUh5eZI{r+YYI6O)ioa8?C& zPOtT11ijG1dEVKzzMWyFbv_PpgI;b(fO|d3#t*JWhI(4ox3eJF-2-DoQ$k(?JVl00 zi=Ma9?GM_vk<9fX$4p4KaO|^dN;%VDvUdEw>k-tjj_*sdYVC;}!RP**o3v?C(=pK9 zr+0bq0q#LIe~*3#>aup6$vMQi2K#J3h;`SG>?Vy%#V3MQP78j4za8_5Z++uuvOuTZ z23PEyhVZ_e*Dg2($=vbQGlDgI<#FcsyN=UT1lF!sLni#T9?2XRo2x5Xd{HF>CL6oB zc*oV$?@+)!n0MOY6L~&A4<@d`Hi=+uI=MSCFEc+nq}rv)bCJ6~BTs}1g<=j)$mb)l zx{@>8E36|}Z}l9gcSla0R%D~O2TurLvcSkoZij-%Z*z#tDVS_|=8!`{MQgHd%cW0O za^QI=dW-9e9qW8A42kp9Nr6{(fY|3xB0T`5gW$JKfP!<)lRMK(OCPoxoNs4V<9F>9 zmlkRe1Lz`_ICF7o_MhNY1P>pBwjq zcQO@V*_(dkf0nk8BS2?5`2B;{^G}gyC0IU{<4POL$^tmMC7zboN*r35UutONqlDc3 zKG-v~mkpj$P_~ed`p@m^s8%Ib!FvR6$Dk9l3I|ZA$B2%I<%uJ91*LQDd`Tr#%i~$1 z@Q`cmn0Sg_ul0uJ?h~?QIEzMP>sUjn3 zs9cf7xMcF$Ub_HTI6R`i5-RD{KISzzRrdJymcMFjT_?26`!yD7_#$Mca>gx?CIsZnyL@UHQkF}J8#dksozpmf~pDFEE$>T z$C)W1uUj$jgk>d36eP3I?vknADqItC$=9<{m9a$!bqmyO@GkN%y2^9dkw33<(V3D{ zk0(nlHJTfJuYLO6vBJcsG5w{HPPo#0gEh4;yl2^VJE^Il=5I#hcI9uofvFMvY-W%> zE;*j@0ISL3Xy9_9kQ!m{59_84wp{HJZ`SeI=XD~?XKBLk^Z6w-8Aqoc(5W^Fl4nEc z#d}Q~v@(+0i4$z`H?Jo3wI<${M0hFcpt5^B_ExO;NT{W)A;->SjkK=nCzP;-AK}ki z)tp{cR`<~uj@PGS_@JV6@tY3#^s3hD=FL(X%0n93r<9^;LC*u;ggqtH=R}hv^>1#p z*2$+|ax@pbdsuf{O1!pp(4MbYhDxRrb9yd`gev=973!(FP{&@n_py2!IE{n(L|JX9tIM?wp2<2B<$AACB=mB!CHzlzmH5z&@ zlc-(d)oWv1`U{NShTgpP9>Pqx6Wr}+>~PcXT0?20Z=53Y(X(goG)1#KpLa>t3b4_x z_*1fd&LerdR4Ac9tpG=wP8@1U6S%T90LW*0U9VJB=nDwe7e_Vd(jGj9Idm)iLLmbG zL_=?!mPnHD460$t=6H2W@swT7oo6t97#vM;_FyAQK zHJa31s)9D~COUdFVQti}Vwv|alyn{@%sAnA5NZ1xFMmSkGm;eN68AT**+zT8S~*^@ zpCLzn$9KDIaddjPy39HJWC>OQLP{3<8;Mvm?W9?x-=}GW<{BQ0{pZ4>)!#E5$5A>y