Source code for viiapackage.results.result_functions.viia_results_pushover

import json
import math
from copy import deepcopy
from inspect import stack
from pathlib import Path
from typing import Optional, List

from rhdhv_fem.fem_tools import fem_write_log, fem_create_folder
from rhdhv_fem.fem_math import fem_greater, fem_smaller

from viiapackage.analyses.nlpo_helper_functions import viia_center_of_mass_nlpo
from viiapackage.results.result_functions.viia_get_latest_result_folder import viia_get_latest_result_folder
from viiapackage.viiaGeneral import viia_find_closest_mesh_node

from openpyxl import Workbook
import numpy as np
from matplotlib import pyplot as plt


[docs]def viia_results_pushover(project: 'ViiaProject', analysis_nr: [str]): """ Generic function for producing output associated with pushover assessment. Can be used in A6, A11, A13 or A14. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -Analysis number, e.g. 'A6' Output: -None: Stores all figures in the associated analysis folders. Stores data in project.project_specific['PO_data'] """ # Make sure to run per load-case if triggered after DIANA analysis if 'viia_create_analysis_files' in [frame[3] for frame in stack()]: load_cases = [ f"{project.project_specific['direction']['current direction']}_" f"{project.project_specific['load_combination']['current load_combination']}"] else: load_cases = None viia_set_up_pushover_result_dict(project) _viia_get_pushover_result_dict_const(project=project, load_cases=load_cases) _viia_get_pushover_result_data_from_tb(project=project, analysis=analysis_nr, load_cases=load_cases) _viia_pushover_data_into_excel(project=project, load_cases=load_cases) _viia_mdof_to_sdof_po_curve(project=project, load_cases=load_cases) _viia_store_demand_curves(project=project) _viia_plot_draft_po_curves(project=project, load_cases=load_cases) viia_get_drift_limits(project=project, load_cases=load_cases) _viia_pushover_data_into_excel(project=project, load_cases=load_cases) if 'Governing lines per load case' in project.project_specific['PO_data'].keys(): _viia_plot_governing_line_per_load_case(project=project, analysis=analysis_nr) if 'Governing lines per direction' in project.project_specific['PO_data'].keys(): _viia_bilinearize_po_curve(project) _viia_assess_po_curve(project) _viia_plot_governing_line_per_direction(project=project, analysis=analysis_nr) _viia_pushover_data_into_excel(project=project, load_cases=load_cases) if 'all lines' in project.project_specific['PO_data'].keys(): _viia_bilinearize_po_curve(project) _viia_assess_po_curve(project) _viia_plot_governing_line_per_direction(project=project, analysis=analysis_nr) _viia_pushover_data_into_excel(project=project, load_cases=load_cases) project.viia_geo_output_nlpo()
[docs]def viia_set_up_pushover_result_dict(project: 'ViiaProject'): ''' This function will set up the project specific dictionary to collect all the result which will be used for the assessment. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. Output: - None: The dictionary for assessment of single line and subsystem have been created and can be read by project.project_specific['PO_data'], whose key is the name of subsystem, while the value is the data for result handling. ''' # Subsystem if project.viia_settings.analysis_subtype == 'SL': storeys = list(viia_center_of_mass_nlpo(project=project).keys()) load_case_names = [ 'X_pos_modal', 'X_neg_modal', 'Y_pos_modal', 'Y_neg_modal', 'X_pos_uni', 'X_neg_uni', 'Y_pos_uni', 'Y_neg_uni', 'X_pos_disp', 'X_neg_disp', 'Y_pos_disp', 'Y_neg_disp'] subsystems = ['global_system'] elif project.viia_settings.analysis_subtype == 'ML': storey_lst = list(set([level for line in project.find_topmiddle_nodes_on_multiline_walls().values() for level in line.keys()])) storeys = sorted(storey_lst, key=lambda x: int("".join([i for i in x if i.isdigit()]))) load_case_names = [ 'X_pos_eqacc', 'X_neg_eqacc', 'Y_pos_eqacc', 'Y_neg_eqacc', 'X_pos_tri', 'X_neg_tri', 'Y_pos_tri', 'Y_neg_tri'] subsystems = [subsystem for subsystem in project.find_multilines_in_model().keys()] if 'PO_data' not in project.project_specific.keys(): project.project_specific['PO_data'] = {} project.project_specific['PO_data'].update({**{subsystem: {**{load_case: {**{storey: {'displacement': None, 'disp_node_nr': None, 'seismic_mass': None, } for storey in storeys}, 'base_shear_node_nr': None, 'base_shear': None, 'effective_mass': None, } for load_case in load_case_names}, 'assess_x': None, 'assess_y': None, 'storeys': storeys, 'nr_storeys': None, 'storey_height': {storey: None for storey in storeys}} for subsystem in subsystems}, 'load_cases': load_case_names, 'subsystems': subsystems}) return
[docs]def _viia_get_pushover_result_dict_const( project: 'ViiaProject', subsystems: Optional[List[str]] = None, load_cases: Optional[List[str]] = None, autofill: Optional[bool] = False): ''' This function will collect all the data for the assessment which will be immutable after mesh. (e.g. the node number of displacement, seismic mass). For subsystem, the evaluation node numbers are determined after mesh for displacement and base shear. The .tb file from linear static analysis is needed to calculate the seismic mass for each subsystem. For SDOF, the evaluation node for displacement will be filled. This function is an antecedent to 'viia_set_up_pushover_result_dict()' Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -subsystems: list with names of subsystems to process -load_cases: list with names of load-cases to process -autofill: Whether to call precedential functions when preparatory data is missing Output: - None: data is added to project.project_specific['PO_data'] ''' if not load_cases: load_cases = project.project_specific['PO_data']['load_cases'] if not subsystems: subsystems = project.project_specific['PO_data']['subsystems'] autofill_aux = autofill while True: try: if None in [project.project_specific['PO_data']]: return break except: if autofill and autofill_aux: viia_set_up_pushover_result_dict(project) autofill_aux = False continue else: project.write_log( f"WARNING: Required preparations for 'viia_get_pushover_result_dict_const()' could not be found.") return # SDOF if project.viia_settings.analysis_subtype == 'SL': # Evaluation nodes for displacement for subsystem in subsystems: project.project_specific['PO_data'][subsystem]['assess_x'] = True project.project_specific['PO_data'][subsystem]['assess_y'] = True for load_case in load_cases: center_of_mass = viia_center_of_mass_nlpo(project=project) for storey in project.project_specific['PO_data'][subsystem]['storeys']: project.project_specific['PO_data'][subsystem][load_case][storey]['disp_node_nr'] = \ viia_find_closest_mesh_node( project=project, target_point=[center_of_mass[storey][direction] for direction in ['x', 'y', 'z']]).id project.project_specific['PO_data'][subsystem][load_case][storey]['seismic_mass'] = \ center_of_mass[storey]['mass'] # Subsystem elif project.viia_settings.analysis_subtype == 'ML': # Check if the dictionary has been set up # Check the existence of .tb files folder = Path(f"{project.workfolder_location}\\{project.analysis_settings['A1']['folder_name']}") output = 'SXXX_OUTPUT_NLPO_EFF_MASSES_TB.tb' output_name = output.replace('SXXX_', '') output_filename = output.replace('SXXX', project.name + '_v' + str(project.version)) file = output_filename tb_path = viia_get_latest_result_folder(project=project, version=project.version, folder=folder, files=file) file = Path(f"{tb_path}\\{file}") if not file.exists(): raise FileNotFoundError(f"ERROR: File not found {file.as_posix()} not found.") tb_for_foundation = project.read_tbfile(file) topmiddle_nodes = project.find_topmiddle_nodes_on_multiline_walls() foundation_nodes = project.find_foundation_nodes_for_multilines() # Load seismic mass sub_folder = Path.cwd() / project.analysis_settings['A1']['folder_name'] seismic_dict_path = viia_get_latest_result_folder(project, folder=sub_folder).joinpath('seismic_mass.json') with open(seismic_dict_path, 'r') as file: seismic_mass_dict = json.load(file) # Append the displacement node number & base shear node number for subsystem in subsystems: if 'x' in subsystem: project.project_specific['PO_data'][subsystem]['assess_x'] = False project.project_specific['PO_data'][subsystem]['assess_y'] = True if 'y' in subsystem: project.project_specific['PO_data'][subsystem]['assess_x'] = True project.project_specific['PO_data'][subsystem]['assess_y'] = False disp_node_nr = topmiddle_nodes[subsystem] base_shear_node_nr = foundation_nodes[subsystem] seismic_mass = seismic_mass_dict[subsystem] for load_case in load_cases: project.project_specific['PO_data'][subsystem][load_case]['base_shear_node_nr'] = base_shear_node_nr for storey in project.project_specific['PO_data'][subsystem]['storeys']: project.project_specific['PO_data'][subsystem][load_case][storey]['disp_node_nr'] = \ disp_node_nr[storey][0] project.project_specific['PO_data'][subsystem][load_case][storey]['seismic_mass'] = seismic_mass return
[docs]def _viia_pushover_data_into_excel( project: 'ViiaProject', subsystems: Optional[List[str]] = None, load_cases: Optional[List[str]] = None, save_file_location: Optional[str] = None): ''' Writes all relevant and available pushover and ADRS data into Excel Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -subsystems: names of the subsystems of which data is desired. -load_cases: Name of the load-cases of which data is desired. -save_file_location: desired path which to store the excel in. Output: -No python output, creates an excel file instead ''' if not save_file_location: save_file_location = project.workfolder_location / 'NLPO_Assessment_data.xlsx' if not load_cases: load_cases = project.project_specific['PO_data']['load_cases'] if not subsystems: subsystems = project.project_specific['PO_data']['subsystems'] wb = Workbook() for load_case in load_cases: wb.create_sheet(title=load_case) wb_sheet = wb[load_case] i = 1 for subsystem in subsystems: if (load_case[0] == 'X' and project.project_specific['PO_data'][subsystem]['assess_x']) or \ (load_case[0] == 'Y' and project.project_specific['PO_data'][subsystem]['assess_y']): wb_sheet.cell(row=i, column=1).value = subsystem i += 1 for storey in project.project_specific['PO_data'][subsystem]['storeys']: wb_sheet.cell(row=i, column=1).value = storey wb_sheet.cell(row=i, column=2).value = 'Displacement [m]' if isinstance(project.project_specific['PO_data'][subsystem][load_case][storey]['displacement'], np.ndarray): for j, value in enumerate(project.project_specific['PO_data'][subsystem][load_case][storey]['displacement']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 wb_sheet.cell(row=i, column=2).value = 'Displacement node nr' wb_sheet.cell(row=i, column=3).value = project.project_specific['PO_data'][subsystem][load_case][storey]['disp_node_nr'] i += 1 wb_sheet.cell(row=i, column=2).value = 'Seismic mass [kg]' wb_sheet.cell(row=i, column=3).value = project.project_specific['PO_data'][subsystem][load_case][storey]['seismic_mass'] i += 1 if 'rel_disp' in project.project_specific['PO_data'][subsystem][load_case][storey].keys(): wb_sheet.cell(row=i, column=2).value = 'Relative displacement' for j, value in enumerate( project.project_specific['PO_data'][subsystem][load_case][storey]['rel_disp']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 wb_sheet.cell(row=i, column=1).value = 'Base shear [N]' if isinstance(project.project_specific['PO_data'][subsystem][load_case]['base_shear'], np.ndarray): for j, value in enumerate(project.project_specific['PO_data'][subsystem][load_case]['base_shear']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 wb_sheet.cell(row=i, column=1).value = 'Base shear node nr' if isinstance(project.project_specific['PO_data'][subsystem][load_case]['base_shear_node_nr'], list): for j, value in enumerate(project.project_specific['PO_data'][subsystem][load_case]['base_shear_node_nr']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 if 'effective_mass' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'Effective mass' wb_sheet.cell(row=i, column=3).value = project.project_specific['PO_data'][subsystem][load_case]['effective_mass'] i += 1 if 'eff_displacement' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'Effective displacement [m]' for j, value in enumerate(project.project_specific['PO_data'][subsystem][load_case]['eff_displacement']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 if 'spectral_acc' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'Spectral acceleration [g]' for j, value in enumerate(project.project_specific['PO_data'][subsystem][load_case]['spectral_acc']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 if 'eff_height' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'Effective height [m]' wb_sheet.cell(row=i, column=3).value = project.project_specific['PO_data'][subsystem][load_case]['eff_height'] i += 1 if 'drift_limits' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'Brittle drift limit [m]' wb_sheet.cell(row=i, column=3).value = project.project_specific['PO_data'][subsystem][load_case]['drift_limits']['brittle']['minimum'] i += 1 wb_sheet.cell(row=i, column=1).value = 'Ductile drift limit [m]' wb_sheet.cell(row=i, column=3).value = project.project_specific['PO_data'][subsystem][load_case]['drift_limits']['ductile']['minimum'] i += 1 if 'disp_cap_sys' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'disp_cap_sys [m]' wb_sheet.cell(row=i, column=3).value = project.project_specific['PO_data'][subsystem][load_case]['disp_cap_sys'] i += 1 if 'disp_y_sys' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'disp_y_sys [m]' wb_sheet.cell(row=i, column=3).value = project.project_specific['PO_data'][subsystem][load_case]['disp_y_sys'] i += 1 if 'spectr_disp_bilin' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'Spectral displacement, bilinearized [g]' for j, value in enumerate(project.project_specific['PO_data'][subsystem][load_case]['spectr_disp_bilin']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 if 'spectr_acc_bilin' in project.project_specific['PO_data'][subsystem][load_case].keys(): wb_sheet.cell(row=i, column=1).value = 'Spectral acceleration, bilinearized [g]' for j, value in enumerate( project.project_specific['PO_data'][subsystem][load_case]['spectr_acc_bilin']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 i += 1 if 'ADRS' in project.project_specific: wb.create_sheet(title='ADRS') wb_sheet = wb['ADRS'] i = 1 for spectrum in project.project_specific['ADRS'].keys(): wb_sheet.cell(row=i, column=1).value = spectrum wb_sheet.cell(row=i, column=2).value = 'Displacement [m]' for j, value in enumerate(project.project_specific['ADRS'][spectrum]['displacement']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 wb_sheet.cell(row=i, column=2).value = 'Acceleration [g]' for j, value in enumerate(project.project_specific['ADRS'][spectrum]['acceleration']): wb_sheet.cell(row=i, column=3 + j).value = value i += 1 i += 1 wb.remove(wb.get_sheet_by_name('Sheet')) wb.save(save_file_location)
[docs]def _viia_mdof_to_sdof_po_curve( project: 'ViiaProject', subsystems: Optional[List[str]] = None, load_cases: Optional[List[str]] = None, autofill: Optional[bool] = False): """ This function converts a MDoF (sub)system into an SDoF equivalent according to the NPR for NLPO. This function is the antecedent to 'viia_get_pushover_result_data_from_tb' Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. - subsystems: list with names of subsystems to process - load_cases: list with names of load-cases to process - autofill: Whether to call precedential functions when preparatory data is missing Output: - None: data is added to project.project_specific['PO_data'] """ if not load_cases: load_cases = project.project_specific['PO_data']['load_cases'] if not subsystems: subsystems = project.project_specific['PO_data']['subsystems'] for subsystem in subsystems: storey_height, n_storeys = _get_storey_height(project) for load_case in load_cases: if not project.project_specific['PO_data'][subsystem][f'assess_{load_case[0].lower()}']: continue data_found = False autofill_aux = autofill while True: try: # Locate stored input data disp = [] base_shear = project.project_specific['PO_data'][subsystem][load_case]['base_shear'] for storey in project.project_specific['PO_data'][subsystem]['storeys']: disp.append(project.project_specific['PO_data'][subsystem][load_case][storey]['displacement']) if fem_greater(abs(np.amin(disp)), abs(np.amax(disp))): disp = [-disp[i] for i in range(len(disp))] mass_matrix = np.zeros((n_storeys, n_storeys)) for n, storey in enumerate(project.project_specific['PO_data'][subsystem]['storeys']): mass_matrix[n, n] = project.project_specific['PO_data'][subsystem][load_case][storey][ 'seismic_mass'] data_found = True break except: if autofill and autofill_aux: _viia_get_pushover_result_dict_const(project, [subsystem], [load_case], True) _viia_get_pushover_result_data_from_tb(project, 'A6', [subsystem], [load_case], True) autofill_aux = False continue else: project.write_log( f"ERROR: required input data for 'viia_mdof_to_sdof_po_curve()'" f" could not be found for subsystem '{subsystem}' in load-case '{load_case}'.") break if not data_found: continue # Prepare analysis result data (displacements and base shear) disp_rel = deepcopy(disp) for n, storey in enumerate(project.project_specific['PO_data'][subsystem]['storeys']): if n + 1 < project.project_specific['PO_data'][subsystem]['nr_storeys']: disp_rel[n + 1] -= disp[n] # Find the effective mass base_shear_60p = 0.6 * max(base_shear) # Find the displacement at 60% of the maximum base shear # TODO: what if not both entries of the mode are positive? disp_60pmax_idx = np.argwhere(np.diff(np.sign(base_shear_60p - base_shear))).flatten()[0] mode = [disp[i][disp_60pmax_idx] for i in range(0, n_storeys)] mode = mode / np.linalg.norm(mode) mass_eff = float(sum(np.matmul(mass_matrix, mode))) # Determine the transformation factor (gamma) if n_storeys > 1 and subsystem != 'global_system': project.write_log( "WARNING: the function 'mdof_to_sdof_po_curve(...) is currently not supported for buildings with " "more than one story. Process aborted.") pass elif n_storeys > 2: # Obtain the assumed mode shapes and the seismic mass matrix mode_sq = mode ** 2 mass_eff2 = sum(np.matmul(mode_sq, mass_matrix)) gamma = mass_eff / mass_eff2 else: gamma = 1 # Rescale top displacement, base shear and height disp_star = disp[-1] * gamma # [m] baseshear_star = base_shear / gamma # [N] height_star = sum(storey_height) / gamma # [m] spectr_a = baseshear_star / ( mass_eff * project.gravitational_acceleration) # [N/kg] converted to [%g] project.project_specific['PO_data'][subsystem][load_case]['eff_displacement'] = disp_star project.project_specific['PO_data'][subsystem][load_case]['effective_mass'] = mass_eff project.project_specific['PO_data'][subsystem][load_case]['spectral_acc'] = spectr_a project.project_specific['PO_data'][subsystem][load_case]['eff_height'] = height_star for n, storey in enumerate(project.project_specific['PO_data'][subsystem]['storeys']): project.project_specific['PO_data'][subsystem][load_case][storey]['rel_displacement'] = disp_rel[n] return
[docs]def _viia_store_demand_curves(project: 'ViiaProject', spectr_red_facs: [List[float]]=[0.564], steps: [int]=100, return_period: [int]=2475, add_elastic: [bool]=True): """ Function that creates (if necassary) and globally stores the requested (in)elastic demand spectra in project.project_specific['ADRS']. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. - spectr_red_facs (list): contains spectral reduction factors associated with the system damping - steps (int): discrete points that control the smoothness of the spectrum - return_period (int): the prescribed return period associated with the demand spectrum in years - add_elastic (bool): whether or not to store the elastic spectrum as well Output: - None: data is added to project.project_specific['ADRS'] """ if not 'ADRS' in project.project_specific.keys(): project.project_specific['ADRS'] = {} for spectr_red_fac in spectr_red_facs: demand_curve, _ = _viia_demand_curve(project, spectr_red_fac, return_period, steps) damping_percentage = 7 / spectr_red_fac ** 2 - 2 key = f"Inelastic {str(round(damping_percentage))}% damped" project.project_specific['ADRS'][key] = {'displacement': demand_curve['sDen'], 'acceleration': demand_curve['sen']} if add_elastic: project.project_specific['ADRS']['Elastic'] = {'displacement': demand_curve['sDe'], 'acceleration': demand_curve['se']} return
[docs]def _viia_plot_draft_po_curves( project: 'ViiaProject', subsystems: Optional[List[str]] = None, load_cases: Optional[List[str]] = None, steps: Optional[int] = None): """ Function to define the governing subsystem for each load-case of subsystem NLPO. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -subsystems: list with names of subsystems to process -load_cases: list with names of load-cases to process -steps: number of steps to be incorporated Output: -None: plots are saved in the associated analysis sub-folder """ if not load_cases: load_cases = project.project_specific['PO_data']['load_cases'] if not subsystems: subsystems = project.project_specific['PO_data']['subsystems'] # Locate response spectrum try: if project.project_specific['ADRS']['Elastic'] and project.project_specific['ADRS']['Inelastic 20% damped']: plot_adrs = True except KeyError: _viia_store_demand_curves(project) try: if project.project_specific['ADRS']['Elastic'] and project.project_specific['ADRS']['Inelastic 20% damped']: plot_adrs = True except: project.write_log( "WARNING: Unable to locate the response spectra.") plot_adrs = False # Set plot style style_file = Path(project.viia_settings.project_specific_package_location) / 'viiaGraph.mplstyle' plt.style.use(style_file.as_posix()) for load_case in load_cases: fig = plt.figure(figsize=(20, 10)) plt.xlim(0, 70) plt.xlabel('Displacement [mm]') plt.ylabel('Acceleration [g]') if plot_adrs: x = project.project_specific['ADRS']['Elastic'][ 'displacement'] * 1000 # meters into millimeters y = project.project_specific['ADRS']['Elastic']['acceleration'] plt.plot(x, y, label='Elastic ADRS (Damping 5%)', linestyle='dashed', linewidth=1.7, color='purple') x = project.project_specific['ADRS']['Inelastic 20% damped'][ 'displacement'] * 1000 # meters into millimeters y = project.project_specific['ADRS']['Inelastic 20% damped']['acceleration'] plt.plot(x, y, label='Inelastic ADRS (Damping 20%)', linestyle='dashed', linewidth=1.7, color='salmon') path = None for subsystem in subsystems: if not project.project_specific['PO_data'][subsystem][f'assess_{load_case[0].lower()}']: continue if subsystem == 'global_system': plot = True for storey in project.project_specific['PO_data'][subsystem]['storeys']: try: disp_mm = project.project_specific['PO_data'][subsystem][load_case][storey][ 'displacement'] * 1000 if fem_smaller(abs(max(disp_mm)), abs(min(disp_mm))): disp_mm = -disp_mm spectr_a = project.project_specific['PO_data'][subsystem][load_case]['spectral_acc'] except TypeError: project.write_log( f"WARNING: no data found for storey '{storey}' in load-case '{load_case}'. Unable to plot.") plot = False break if plot: if not steps: steps = len(disp_mm) plt.plot(disp_mm[:steps], spectr_a[:steps], label=storey) try: plt.title(f"PO-curve MDoF, {load_case}") plt.legend(loc='upper right') plt.savefig( f"{str(project.project_specific['PO_data'][subsystem][load_case]['path'])}\\PO_curve_MDOF.png") except: project.write_log( f"WARNING: no data found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") else: try: plot = True disp_mm = project.project_specific['PO_data'][subsystem][load_case]['eff_displacement'] * 1000 if fem_smaller(abs(max(disp_mm)), abs(min(disp_mm))): disp_mm = -disp_mm spectr_a = project.project_specific['PO_data'][subsystem][load_case]['spectral_acc'] path = project.project_specific['PO_data'][subsystem][load_case]['path'] except: project.write_log( f"WARNING: no data found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") plot = False if plot: if not steps: steps = len(disp_mm) plt.plot(disp_mm[:steps], spectr_a[:steps], label=subsystem) if not subsystems == ['global_system'] and path: plt.title(f"PO-curves SDoF, {load_case}") plt.legend(loc='upper right') # Latest folder analysis_folder = Path(f"{project.workfolder_location}\\" f"{project.diana_settings.analysis_settings['A6']['folder_name']}" f"\\{load_case}") sub_folder = viia_get_latest_result_folder(project=project, folder=analysis_folder) plt.savefig(f"{sub_folder}\\PO_curve_SDOF_all_subsystems.png") return
[docs]def viia_get_drift_limits(project: 'ViiaProject', com_list_per_storey: List[float], category: str = 'Ductile'): """ This function obtains the drift limits of a SDoF system equivalent according to the NPR for NLPO. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. - com_list_per_storey (list): list with names of subsystems to process Output: - Data is added to project.project_specific['drift_limits'] """ # Getting a list of the z coordinates for the storey heights z_coor = [point[2] for point in com_list_per_storey] z_coor = sorted(z_coor) # Factor for ductile or brittle failure factor = {'Ductile':0.015, 'Brittle':0.006} # Adding the drift limits per storey drift_limits = {} # Adding the drift limits based on ductility or brittle (Additional factor of 0.2 added so as to have a few more steps of the calculation) for i in range(len(z_coor)): if i == 0: if category == 'Ductile': drift_limits['N' + str(i)] = z_coor[i]*factor['Ductile']*1.2 elif category == 'Brittle': drift_limits['N' + str(i)] = z_coor[i]*factor['Brittle']*1.2 else: project.write_log( 'Select either Ductile or Brittle as the category') else: if category == 'Ductile': drift_limits['N' + str(i)] = (z_coor[i]-z_coor[i-1])*factor['Ductile']*1.2 elif category == 'Brittle': drift_limits['N' + str(i)] = (z_coor[i] - z_coor[i - 1]) * factor['Brittle']*1.2 project.project_specific['drift_limits'] = drift_limits return
[docs]def _viia_plot_governing_line_per_load_case(project: 'ViiaProject', analysis: [str]='A6', steps: Optional[int] = None): """ Plots the governing pushover lines as set in 'viia_set_governing_line_per_load_case', separately for 'x' and 'y' direction. If available, the 5% and 20% damped ADRS, and the brittle and ductile drift limits are also plotted. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. - analysis: the analysis number associated with the plotted data, e.g. 'A6' - steps: the number of steps to be incorporated Output: - None: stores the plots in the 'ADRS' sub-folder """ # Locate response spectrum try: if project.project_specific['ADRS']['Elastic'] and project.project_specific['ADRS'][ 'Inelastic 20% damped']: plot_adrs = True except KeyError: _viia_store_demand_curves(project) try: if project.project_specific['ADRS']['Elastic'] and project.project_specific['ADRS'][ 'Inelastic 20% damped']: plot_adrs = True except: project.write_log( "ERROR: Unable to locate the inelastic response spectra") plot_adrs = False # Set plot style style_file = Path(project.viia_settings.project_specific_package_location) / 'viiaGraph.mplstyle' plt.style.use(style_file.as_posix()) load_cases = project.project_specific['PO_data']['load_cases'] for direction in ['x', 'y']: fig = plt.figure(figsize=(20, 10)) plt.xlim(0, 70) plt.xlabel('Displacement [mm]') plt.ylabel('Acceleration [g]') if plot_adrs: x = project.project_specific['ADRS']['Elastic'][ 'displacement'] * 1000 # meters into millimeters y = project.project_specific['ADRS']['Elastic']['acceleration'] plt.plot(x, y, label='Elastic ADRS (Damping 5%)', linestyle='dashed', linewidth=1.7, color='purple') x = project.project_specific['ADRS']['Inelastic 20% damped'][ 'displacement'] * 1000 # meters into millimeters y = project.project_specific['ADRS']['Inelastic 20% damped']['acceleration'] plt.plot(x, y, label='Inelastic ADRS (Damping 20%)', linestyle='dashed', linewidth=1.7, color='salmon') drift_limit_br = None drift_limit_dc = None for load_case in load_cases: if load_case[0].lower() == direction: if project.project_specific['PO_data']['subsystems'] == ['global_system']: subsystem = 'global_system' else: subsystem = project.project_specific['PO_data']['Governing lines per load case'][load_case] try: plot = True disp_mm = project.project_specific['PO_data'][subsystem][load_case]['eff_displacement'] * 1000 if fem_smaller(abs(max(disp_mm)), abs(min(disp_mm))): disp_mm = -disp_mm spectr_a = project.project_specific['PO_data'][subsystem][load_case]['spectral_acc'] if drift_limit_br: drift_limit_br = min(drift_limit_br, project.project_specific['PO_data'][subsystem][load_case]['drift_limits'][ 'brittle']['minimum']) else: drift_limit_br = \ project.project_specific['PO_data'][subsystem][load_case]['drift_limits']['brittle']['minimum'] if drift_limit_dc: drift_limit_dc = min(drift_limit_dc, project.project_specific['PO_data'][subsystem][load_case]['drift_limits'][ 'ductile']['minimum']) else: drift_limit_dc = \ project.project_specific['PO_data'][subsystem][load_case]['drift_limits']['ductile']['minimum'] except: project.write_log( f"WARNING: no data found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") plot = Falser if plot: if not steps: steps = len(disp_mm) plt.plot(disp_mm[:steps], spectr_a[:steps], label=f"{subsystem}, {load_case}") if drift_limit_br: plt.axvline(x=drift_limit_br * 1000, label='Minimum brittle drift limit', linestyle='dotted', linewidth=1.7, color='lime') # in [mm] if drift_limit_dc: plt.axvline(x=drift_limit_dc * 1000, label='Minimum ductile drift limit', linestyle='dotted', linewidth=1.7, color='orangered') # in [mm] plt.title(f"Governing pushover curves, {direction}-direction") plt.legend(loc='upper right') fem_create_folder(f"{project.diana_settings.analysis_settings[analysis]['folder_name']}\\ADRS") plt.savefig( f"{str(Path.cwd())}\\{project.diana_settings.analysis_settings[analysis]['folder_name']}\\ADRS\\governing_po_{direction}.png") return
[docs]def _viia_bilinearize_po_curve(project: 'ViiaProject', combinations: Optional[List[dict]] = None, autofill: Optional[bool] = True, _iteration: [bool] = False): """ Bilinearizes pushover curves of SDoF equivalent systems. This function is an antecedent to 'viia_get_drift_limits()' Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. - combinations: list of dictionaries with keys 'subsystem', 'load_case' and 'failure_mechanism', respectively containing the name of a subsystem (e.g. 'global_system or 'line_of_y_8.3'), the name of the load-case (e.g. 'X_pos_uni' or 'Y_neg_tri', and the type of failure mechanism (either 'brittle' or 'ductile'). If not inputted, the function will consider the governing pushover lines for 'x' and 'y' direction, which are to be set using 'viia_set_governing_lines_per_direction(...)'. - autofill: Whether to call precedential functions when preparatory data is missing - _iteration: an option designed purely for use in 'viia_assess_po_curve'. It is strictly recommended to use the default input argument. Output: - None: data is added to project.project_specific['PO_data'] """ if not combinations: if 'all lines' in project.project_specific['PO_data']: comb = 0 load_cases = project.project_specific['PO_data']['load_cases'] subsystems = project.project_specific['PO_data']['subsystems'] failure_mechs = project.project_specific['PO_data']['all lines'] for subsystem in subsystems: for failure_mech in failure_mechs: for load_case in load_cases: if 'y' in subsystem.split('_'): if 'X' in load_case.split('_'): if comb == 0: combinations = [] comb = 1 combinations.append({ 'subsystem': subsystem, 'load_case': load_case, 'failure_mechanism': failure_mech}) else: continue elif 'x' in subsystem.split('_'): if 'Y' in load_case.split('_'): if comb == 0: combinations = [] comb = 1 combinations.append({ 'subsystem': subsystem, 'load_case': load_case, 'failure_mechanism': failure_mech}) else: continue else: combinations = [ project.project_specific['PO_data']['Governing lines per direction']['X'], project.project_specific['PO_data']['Governing lines per direction']['Y']] for combination in combinations: subsystem = combination['subsystem'] load_case = combination['load_case'] failure_mech = combination['failure_mechanism'] data_found = False autofill_aux = autofill while True: if _iteration == False: try: # Locate stored input data spectr_a = project.project_specific['PO_data'][subsystem][load_case]['spectral_acc'] disp_star = project.project_specific['PO_data'][subsystem][load_case]['eff_displacement'] mass_eff = project.project_specific['PO_data'][subsystem][load_case]['effective_mass'] drift_limit = \ project.project_specific['PO_data'][subsystem][load_case]['drift_limits'][failure_mech][ 'minimum'] data_found = True break except: if autofill and autofill_aux: viia_get_drift_limits(project, [subsystem], [load_case], True) autofill_aux = False continue else: project.write_log( f"ERROR: required input data for 'viia_bilinearize_po_curve()'" f" could not be found for subsystem '{subsystem}' in load-case '{load_case}'.") break elif _iteration == True: try: # Locate stored input data disp_cap_sys = project.project_specific['PO_data'][subsystem][load_case]['disp_cap_sys'] disp_y_sys = project.project_specific['PO_data'][subsystem][load_case]['disp_y_sys'] ductility_factor = project.project_specific['PO_data'][subsystem][load_case]['mu_sys_iterated'] drift_limit = \ project.project_specific['PO_data'][subsystem][load_case]['drift_limits'][failure_mech][ 'minimum'] disp_int = project.project_specific['PO_data'][subsystem][load_case]['disp_intersect'] data_found = True break except: if autofill and autofill_aux: viia_get_drift_limits(project, [subsystem], [load_case], True) autofill_aux = False continue else: project.write_log( f"ERROR: required input data for 'viia_bilinearize_po_curve()'" f" could not be found for subsystem '{subsystem}' in load-case '{load_case}'.") break if not data_found: continue if _iteration == False: drift_idx = np.argwhere(np.diff(np.sign(drift_limit - disp_star))).flatten() if len(drift_idx) > 0: disp_star = disp_star[:drift_idx[0]] spectr_a = spectr_a[:drift_idx[0]] spectr_a_80pmax = 0.8 * max(spectr_a) # [%g] spectr_a_60pmax = 0.6 * max(spectr_a) # [%g] spectr_a_50pmax = 0.5 * max(spectr_a) # [%g] disp_60pmax_idx = np.argwhere( np.diff(np.sign(spectr_a_60pmax - spectr_a))).flatten() # Is this sufficiently accurate? disp_60pmax = disp_star[disp_60pmax_idx[0]] # [m] # Find the displacement at 50% of the maximum base shear disp_cap_sys_idx = np.argwhere( np.diff(np.sign(spectr_a_50pmax - spectr_a))).flatten() # Is this sufficiently accurate? if len(disp_cap_sys_idx) > 1: disp_cap_sys = disp_star[disp_cap_sys_idx[1]] # [m] if fem_smaller(disp_cap_sys, drift_limit): disp_cap_sys_idx = disp_cap_sys_idx[1] # [m] else: project.write_log( "WARNING: the deformation capacity has been bounded to the drift limit") disp_cap_sys = drift_limit disp_cap_sys_idx = (np.argwhere(np.diff(np.sign(disp_cap_sys - disp_star))).flatten())[0] else: disp_cap_sys = disp_star[-1] if fem_smaller(disp_cap_sys, drift_limit): disp_cap_sys_idx = len(disp_star) - 1 else: project.write_log( "WARNING: the deformation capacity has been bounded to the drift limit") disp_cap_sys = drift_limit disp_cap_sys_idx = (np.argwhere(np.diff(np.sign(disp_cap_sys - disp_star))).flatten())[0] # Bilinearize the pushover curve disp_cap_bilin_idx = np.argwhere( np.diff(np.sign(spectr_a_80pmax - spectr_a))).flatten() # Is this sufficiently accurate? if len(disp_cap_bilin_idx) > 1: disp_cap_bilin = disp_star[disp_cap_bilin_idx[1]] # [m] if fem_smaller(disp_cap_bilin, drift_limit): disp_cap_bilin_idx = disp_cap_bilin_idx[1] # [m] else: project.write_log( "WARNING: the deformation capacity has been bounded to the drift limit") disp_cap_bilin = drift_limit disp_cap_bilin_idx = (np.argwhere(np.diff(np.sign(disp_cap_bilin - disp_star))).flatten())[0] else: disp_cap_bilin = disp_star[-1] if fem_smaller(disp_cap_bilin, drift_limit): disp_cap_bilin_idx = len(disp_star) - 1 else: project.write_log( "WARNING: the deformation capacity has been bounded to the drift limit") disp_cap_bilin = drift_limit disp_cap_bilin_idx = (np.argwhere(np.diff(np.sign(disp_cap_bilin - disp_star))).flatten())[0] from scipy import integrate area = integrate.cumtrapz(spectr_a[0:disp_cap_bilin_idx], disp_star[0:disp_cap_bilin_idx], initial=0)[ -1] # [N g] energy_m = area * mass_eff * project.gravitational_acceleration # [Nm] k_init = spectr_a_60pmax * mass_eff * project.gravitational_acceleration / disp_60pmax # [N/m] ## Calculating the effective secant eigenvalue secant_eigen = math.sqrt(k_init / mass_eff) project.project_specific['PO_data'][subsystem][load_case]['secant_eigen'] = secant_eigen # [Hz] spectr_a_y = (disp_cap_bilin * k_init - np.sqrt((disp_cap_bilin * k_init) ** 2 - 2 * energy_m * k_init)) / ( \ mass_eff * project.gravitational_acceleration) # [N/kg] converted to [%g] if fem_smaller(spectr_a_y, 0.1) or fem_greater(spectr_a_y, 5): project.write_log( f"WARNING: the value of 'spectr_a_y'' is expected to be in the " f"range of 0.1-5. This is not the case for '{subsystem}' with {load_case}. Please check input data.") disp_y_sys = spectr_a_y * mass_eff / k_init # [m] ductility_factor = max(1, min(disp_cap_bilin / disp_y_sys, 4)) spectr_a_y = spectr_a_y * np.ones(disp_cap_sys_idx) # [%g] spectr_init = disp_star[:disp_cap_sys_idx] * k_init / ( mass_eff * project.gravitational_acceleration) # [%g] try: disp_a_y_idx = int(np.argwhere(np.diff(np.sign(spectr_init - spectr_a_y))).flatten()[0]) except: disp_a_y_idx = len(spectr_a_y) - 1 spectr_a_bilin = np.append(spectr_init[0:disp_a_y_idx], spectr_a_y[(disp_a_y_idx):]) # [%g] disp_star_bilin = disp_star[0:len(spectr_a_bilin)] elif _iteration == True: if disp_int: disp_duct_sys = min(disp_cap_sys, disp_int) else: disp_duct_sys = disp_cap_sys ductility_factor = max(1, min(disp_duct_sys / disp_y_sys, 4)) if _iteration == False: project.project_specific['PO_data'][subsystem][load_case]['disp_cap_sys'] = disp_cap_sys project.project_specific['PO_data'][subsystem][load_case]['disp_y_sys'] = disp_y_sys project.project_specific['PO_data'][subsystem][load_case]['spectr_disp_bilin'] = disp_star_bilin project.project_specific['PO_data'][subsystem][load_case]['spectr_acc_bilin'] = spectr_a_bilin project.project_specific['PO_data'][subsystem][load_case]['mu_sys_initial'] = ductility_factor elif _iteration == True: project.project_specific['PO_data'][subsystem][load_case]['mu_sys_iterated'] = ductility_factor return
[docs]def _viia_assess_po_curve( project: 'ViiaProject', combinations: Optional[List[dict]] = None, autofill: Optional[bool] = True, tolerance: [float] = 0.1): """ This function assesses the requested pushover lines with the corresponding ADRS. This function is an antecedent to 'viia_bilinearize_po_curve()' Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -combinations: list of dictionaries with keys 'subsystem', 'load_case' and 'failure_mechanism', respectively containing the name of a subsystem (e.g. 'global_system or 'line_of_y_8.3'), the name of the load-case (e.g. 'X_pos_uni' or 'Y_neg_tri', and the type of failure mechanism (either 'brittle' or 'ductile'). If not inputted, the function will consider the governing pushover lines for 'x' and 'y' direction, which are to be set using 'viia_set_governing_lines_per_direction(...)'. -autofill: whether or not to call precedential functions when preparatory data is missing -tolerance: tolerance associated with the change in iterated result value of the system ductility factor Output: - None: data is added to project.project_specific['PO_data'] """ if not combinations: if 'all lines' in project.project_specific['PO_data']: comb = 0 load_cases = project.project_specific['PO_data']['load_cases'] subsystems = project.project_specific['PO_data']['subsystems'] failure_mechs = project.project_specific['PO_data']['all lines'] for subsystem in subsystems: for failure_mech in failure_mechs: for load_case in load_cases: if 'y' in subsystem.split('_'): if 'X' in load_case.split('_'): if comb == 0: combinations = [{'subsystem': subsystem, 'load_case': load_case, 'failure_mechanism': failure_mech}] comb = 1 else: combinations.append({'subsystem': subsystem, 'load_case': load_case, 'failure_mechanism': failure_mech}) else: continue elif 'x' in subsystem.split('_'): if 'Y' in load_case.split('_'): if comb == 0: combinations = [{'subsystem': subsystem, 'load_case': load_case, 'failure_mechanism': failure_mech}] comb = 1 else: combinations.append({'subsystem': subsystem, 'load_case': load_case, 'failure_mechanism': failure_mech}) else: continue else: combinations = [project.project_specific['PO_data']['Governing lines per direction']['X'], project.project_specific['PO_data']['Governing lines per direction']['Y']] for combination in combinations: subsystem = combination['subsystem'] load_case = combination['load_case'] data_found = False autofill_aux = autofill while True: try: # Locate stored input data disp_cap_sys = spectr_disp_bilin = project.project_specific['PO_data'][subsystem][load_case][ 'disp_cap_sys'] spectr_disp_bilin = project.project_specific['PO_data'][subsystem][load_case][ 'spectr_disp_bilin'] spectr_a_bilin = project.project_specific['PO_data'][subsystem][load_case]['spectr_acc_bilin'] system_ductility_factor_assumed = project.project_specific['PO_data'][subsystem][load_case][ 'mu_sys_initial'] data_found = True break except: if autofill and autofill_aux: _viia_bilinearize_po_curve(project, [combination], True, False) autofill_aux = False continue else: project.write_log( f"ERROR: required input data for 'viia_assess_po_curve()'" f" could not be found for subsystem '{subsystem}' in load-case '{load_case}'.") break if not data_found: continue project.project_specific['PO_data'][subsystem][load_case]['spectr_disp_bilin'] = deepcopy( spectr_disp_bilin) project.project_specific['PO_data'][subsystem][load_case]['spectr_acc_bilin'] = deepcopy(spectr_a_bilin) project.project_specific['PO_data'][subsystem][load_case]['mu_sys_iterated'] = deepcopy( system_ductility_factor_assumed) disp_int = project.project_specific['PO_data'][subsystem][load_case]['disp_intersect'] = None system_ductility_factor_found = None system_ductility_factor_assumed = None while system_ductility_factor_assumed == None or abs( system_ductility_factor_assumed - system_ductility_factor_found) > tolerance: system_ductility_factor_assumed = deepcopy(system_ductility_factor_found) _compute_damping(project, [combination], autofill) spectr_red_fac = project.project_specific['PO_data'][subsystem][load_case]['spectr_red_factor'] response_spectrum, _ = _viia_demand_curve(project, spectr_red_fac) system_ductility_factor_found = project.project_specific['PO_data'][subsystem][load_case][ 'mu_sys_iterated'] # Interpolate the PO-curve displacement samples into the ADRS data spectr_acc_bilin_aux = np.interp(response_spectrum['sDen'], spectr_disp_bilin, spectr_a_bilin) # Find the intersection between capacity and demand curves disp_int_idx = np.argwhere(np.diff(np.sign(spectr_acc_bilin_aux - response_spectrum['sen']))).flatten() try: disp_int = response_spectrum['sDen'][disp_int_idx[0]] except IndexError: disp_int = None project.project_specific['PO_data'][subsystem][load_case]['disp_int'] = disp_int _viia_bilinearize_po_curve(project, [combination], autofill, True) # Compliance_check if disp_int != None: NBS = disp_cap_sys / disp_int if NBS < 1: project.write_log( f"The calculated %NBS value of {NBS} is not compliant.") project.project_specific['PO_data'][subsystem][load_case]['compliance'] = False elif NBS > 1: project.write_log( f"The calculated %NBS value of {NBS} is compliant.") project.project_specific['PO_data'][subsystem][load_case]['compliance'] = True else: project.write_log( "No intersection found between bilinear capacity curve and inelastic demand curve. No compliance.") project.project_specific['PO_data'][subsystem][load_case]['compliance'] = False return
[docs]def _viia_plot_governing_line_per_direction(project: 'ViiaProject', analysis='A6', steps: Optional[int] = None): """ Plots the governing bilinearized pushover lines as set in 'viia_set_governing_line_per_direction', seperately for 'x' and 'y' direction. If available, the 5% and 20% damped ADRS, and the brittle and ductile drift limits are also plotted. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -analysis: the analysis number associated with the plotted data, e.g. 'A6' -steps: the number of steps to be incorporated Output: -None: stores the plots in the 'ADRS' subfolder """ # Locate response spectrum try: if project.project_specific['ADRS']['Elastic'] and project.project_specific['ADRS'][ 'Inelastic 20% damped']: plot_adrs = True except KeyError: _viia_store_demand_curves(project) try: if project.project_specific['ADRS']['Elastic'] and project.project_specific['ADRS'][ 'Inelastic 20% damped']: plot_adrs = True except: project.write_log( "ERROR: Unable to locate the inelastic response spectra") plot_adrs = False # Set plot style style_file = Path(project.viia_settings.project_specific_package_location) / 'viiaGraph.mplstyle' plt.style.use(style_file.as_posix()) for direction in ['x', 'y']: fig = plt.figure(figsize=(20, 10)) plt.xlim(0, 70) plt.xlabel('Displacement [mm]') plt.ylabel('Acceleration [g]') if plot_adrs: x = project.project_specific['ADRS']['Elastic'][ 'displacement'] * 1000 # meters into millimeters y = project.project_specific['ADRS']['Elastic']['acceleration'] plt.plot(x, y, label='Elastic ADRS (Damping 5%)', linestyle='dashed', linewidth=1.7, color='purple') x = project.project_specific['ADRS']['Inelastic 20% damped'][ 'displacement'] * 1000 # meters into millimeters y = project.project_specific['ADRS']['Inelastic 20% damped']['acceleration'] plt.plot(x, y, label='Inelastic ADRS (Damping 20%)', linestyle='dashed', linewidth=1.7, color='salmon') if 'all lines' in project.project_specific['PO_data']: load_cases = project.project_specific['PO_data']['load_cases'] subsystems = project.project_specific['PO_data']['subsystems'] for subsystem in subsystems: for load_case in load_cases: if 'governing load-case' in project.project_specific['PO_data']: if project.project_specific['PO_data']['governing load-case'][0] in load_case: if 'y' in subsystem.split('_') and 'y' in direction: if 'X' in load_case.split('_'): try: disp_mm = project.project_specific['PO_data'][subsystem][load_case][ 'spectr_disp_bilin'] * 1000 if fem_smaller(abs(max(disp_mm)), abs(min(disp_mm))): disp_mm = -disp_mm spectr_a = project.project_specific['PO_data'][subsystem][load_case]['spectr_acc_bilin'] if not steps: steps = len(disp_mm) plt.plot(disp_mm[:steps], spectr_a[:steps], label=f"{subsystem}, {load_case}") except TypeError: project.write_log( f"WARNING: no data found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") else: continue if 'x' in subsystem.split('_') and 'x' in direction: if 'Y' in load_case.split('_'): try: disp_mm = project.project_specific['PO_data'][subsystem][load_case]['spectr_disp_bilin'] * 1000 if fem_smaller(abs(max(disp_mm)), abs(min(disp_mm))): disp_mm = -disp_mm spectr_a = project.project_specific['PO_data'][subsystem][load_case]['spectr_acc_bilin'] if not steps: steps = len(disp_mm) plt.plot(disp_mm[:steps], spectr_a[:steps], label=f"{subsystem}, {load_case}") except TypeError: project.write_log( f"WARNING: no data found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") else: continue else: if 'y' in subsystem.split('_') and 'y' in direction: if 'X' in load_case.split('_'): try: disp_mm = project.project_specific['PO_data'][subsystem][load_case][ 'spectr_disp_bilin'] * 1000 if fem_smaller(abs(max(disp_mm)), abs(min(disp_mm))): disp_mm = -disp_mm spectr_a = project.project_specific['PO_data'][subsystem][load_case][ 'spectr_acc_bilin'] if not steps: steps = len(disp_mm) plt.plot(disp_mm[:steps], spectr_a[:steps], label=f"{subsystem}, {load_case}") except TypeError: project.write_log( f"WARNING: no data found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") else: continue if 'x' in subsystem.split('_') and 'x' in direction: if 'Y' in load_case.split('_'): try: disp_mm = project.project_specific['PO_data'][subsystem][load_case][ 'spectr_disp_bilin'] * 1000 if fem_smaller(abs(max(disp_mm)), abs(min(disp_mm))): disp_mm = -disp_mm spectr_a = project.project_specific['PO_data'][subsystem][load_case][ 'spectr_acc_bilin'] if not steps: steps = len(disp_mm) plt.plot(disp_mm[:steps], spectr_a[:steps], label=f"{subsystem}, {load_case}") except TypeError: project.write_log( f"WARNING: no data found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") else: continue else: load_case = project.project_specific['PO_data']['Governing lines per direction'][direction.upper()]['load_case'] subsystem = project.project_specific['PO_data']['Governing lines per direction'][direction.upper()]['subsystem'] try: disp_mm = project.project_specific['PO_data'][subsystem][load_case]['spectr_disp_bilin'] * 1000 if fem_smaller(abs(max(disp_mm)), abs(min(disp_mm))): disp_mm = -disp_mm spectr_a = project.project_specific['PO_data'][subsystem][load_case]['spectr_acc_bilin'] if not steps: steps = len(disp_mm) plt.plot(disp_mm[:steps], spectr_a[:steps], label=f"{subsystem}, {load_case}") except TypeError: project.write_log( f"WARNING: no data found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") try: drift_limit_br = project.project_specific['PO_data'][subsystem][load_case]['drift_limits']['brittle'][ 'minimum'] plt.axvline(x=drift_limit_br * 1000, label='Minimum brittle drift limit', linestyle='dotted', linewidth=1.7, color='lime') # in [mm] except: project.write_log( f"WARNING: no brittle drift limit found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") try: drift_limit_dc = project.project_specific['PO_data'][subsystem][load_case]['drift_limits']['ductile'][ 'minimum'] plt.axvline(x=drift_limit_dc * 1000, label='Minimum ductile drift limit', linestyle='dotted', linewidth=1.7, color='orangered') # in [mm] except: project.write_log( f"WARNING: no ductile drift limit found for subsystem '{subsystem}' in load-case '{load_case}'. Unable to plot.") plt.title(f"Governing bilinearized pushover curve, {direction}-direction") plt.legend(loc='upper right') fem_create_folder(f"{project.diana_settings.analysis_settings[analysis]['folder_name']}\\ADRS") plt.savefig( f"{str(Path.cwd())}\\{project.diana_settings.analysis_settings[analysis]['folder_name']}\\ADRS\\bilinear_po_{direction}.png") return
[docs]def _get_storey_height(project: 'ViiaProject', subsystems: Optional[List[str]] = None): """ Function that finds the number of storeys and the height of each storey (per subsystem) Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -subsystems: list with names of subsystems to process Output: -list of story heights -number of storeys -data is stored in project.project_specific['PO_data'] """ if not subsystems: subsystems = project.project_specific['PO_data']['subsystems'] # TODO: adjust for the true floors in a subsytems # TODO: integrate this part in the setup of the main dictionary for subsystem in subsystems: # Find the number of storeys of the building storey = [] for floor in project.collections.floors: if floor not in project.collections.roofs and floor.name[0] == 'N': storey.append(int(floor.name.split('N')[1].split('-')[0])) n_storeys = max(storey) # Find the storey heights height_per_storey = [] for j in range(0, n_storeys + 1): z_levels = [floor.contour.get_z() for floor in project.collections.floors if floor not in project.collections.roofs and floor.contour.is_horizontal() and floor.name[0] == 'N' and int(floor.name.split('N')[1].split('-')[0]) == n_storeys - j] if z_levels: height_per_storey.insert(0, max(z_levels)) # in [m] elif f'N{n_storeys - j}' in project.project_specific['storey_levels'] and \ project.project_specific['storey_levels'][f'N{n_storeys - j}'] is not None: height_per_storey.insert(0, project.project_specific['storey_levels'][f'N{n_storeys - j}']) project.write_log( f"WARNING: manual storey level detected and used from " f"'project.project_specific['storey_levels']['N{n_storeys - j}']'") else: project.write_log( f"ERROR: no storey level can be detected for N{n_storeys - j}, " f"please define the floor level manually in 'project.project_specific['storey_levels']" f"['N{n_storeys - j}']' as a float.") return None, n_storeys storey_height = height_per_storey for j in range(1, len(storey_height)): storey_height[j] = height_per_storey[j] - height_per_storey[j - 1] storey_height.pop(0) project.project_specific['PO_data'][subsystem]['nr_storeys'] = n_storeys for n, storey in enumerate(project.project_specific['PO_data'][subsystem]['storeys']): project.project_specific['PO_data'][subsystem]['storey_height'][storey] = storey_height[n] return storey_height, n_storeys
[docs]def _viia_demand_curve(project: 'ViiaProject', spectr_red_fac=1., return_period=2475, steps=100): """ This is function is to create the seismic demand curve for push over analysis. The corresponding theory and procedure can be found in NLPO protocol assessment section 3. If the adress of the object is set priorly, the function is able to collect preparatory data from the NEN webtool. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. - spectr_red_fac (float): spectral reduction factor associated with the system damping - return_period (int): the prescribed return period associated with the demand spectrum in years - steps (int): discrete points that control the smoothness of the figure Output: - dictionary that contains the (in)/elastic displacement/accelertion response spectrum value - dictionary with associated spectrum parameters Attribute: - viia_demand_curve.get_value_ADR return a dict containing the (in)/elastic disp/acceleration spectrum value on period t >> data = viia_demand_curve(response_spectrum_file) >> data_at_random_time = viia_demand_curve.get_value_ADR(t=0.5) >> data_at_random_time = {'se': 0.521809934, 'sDe': 32.38312009139122, 'sen': 0.29434049937163764, 'sDen': 18.266543271504304} """ # Arguments handling return_period = str(return_period) for spectrum in project.collections.response_spectra: if f"NPR9998:2018-{return_period}" in spectrum.name: break else: spectrum = None if not spectrum: project.write_log("ERROR: no response spectrum was found for the specified return period") pass ag_s = spectrum.peak_ground_acceleration ag_d = ag_s * project.importance_factor t_b = spectrum.t_b t_c = spectrum.t_c t_d = spectrum.t_d rho = spectrum.plateau_factor # Calculate required spectrum value given a period def get_value_ADR(project: 'ViiaProject', t): # t_b: lower limit of the period of the constant spectral acceleration branch if t < t_b: se = ag_d * (1 + t / t_b * (rho - 1)) sen = ag_d * (1 + t / t_b * (spectr_red_fac * rho - 1)) # t_c: upper limit of the period of the constant spectral acceleration branch if t_b <= t < t_c: se = ag_d * rho sen = ag_d * rho * spectr_red_fac # t_d: value defining the start of the constant displacement response spectrum if t_c <= t < t_d: se = ag_d * rho * t_c / t sen = ag_d * rho * t_c / t * spectr_red_fac if t >= t_d: se = ag_d * rho * t_c * t_d / t ** 2 sen = ag_d * rho * t_c * t_d / t ** 2 * spectr_red_fac # Elastic displacement response spectrum sDe = se * (t / (2 * math.pi)) ** 2 * project.gravitational_acceleration # Inelastic displacement response spectrum sDen = sen * (t / (2 * math.pi)) ** 2 * project.gravitational_acceleration return {'se': se, 'sDe': sDe, 'sen': sen, 'sDen': sDen} # Create attribute, in case this function is used _viia_demand_curve.get_value_ADR = get_value_ADR # The range of period is 0-2 t = np.linspace(0, 2, steps) # Convert from list of tuples to array result = np.array([list(get_value_ADR(project, value).values()) for value in t]) project.project_specific['parameters']['ag_s'] = ag_s project.project_specific['parameters']['ag_d'] = ag_d project.project_specific['parameters']['p'] = rho project.project_specific['parameters']['t_b'] = t_b project.project_specific['parameters']['t_c'] = t_c project.project_specific['parameters']['t_d'] = t_d project.project_specific['parameters']['Jaar'] = return_period # Collect results return {'se': result[:, 0], 'sDe': result[:, 1], 'sen': result[:, 2], 'sDen': result[:, 3]}, { 'parameters': {'ag_s': ag_s, 'ag_d': ag_d, 'p': rho, 't_b': t_b, 't_c': t_c, 't_d': t_d, 'Jaar': return_period}}
[docs]def _compute_damping(project: 'ViiaProject', combinations: Optional[List[dict]] = None, autofill: Optional[bool] = False, soil_damping=False): """ This function computes the spectral reduction factor based on given characteristics of the structure. It is designed for nested use in 'viia_assess_po_curve()' Stand-alone use is not recommended Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -combinations: list of dictionaries with keys 'subsystem', 'load_case' and 'failure_mechanism', respectively containing the name of a subsystem (e.g. 'global_system or 'line_of_y_8.3'), the name of the load-case (e.g. 'X_pos_uni' or 'Y_neg_tri', and the type of failure mechanism (either 'brittle' or 'ductile'). If not inputted, the function will consider the governing pushover lines for 'x' and 'y' direction, which are to be set using 'viia_set_governing_lines_per_direction(...)'. -autofill: whether or not to call precedential functions when preparatory data is missing -soil_damping: whether to take into account soil radiation damping. This is currently not supported Output: - None: data is added to project.project_specific['PO_data'] """ if not combinations: combinations = [project.project_specific['PO_data']['Governing lines per direction']['X'], project.project_specific['PO_data']['Governing lines per direction']['Y']] for combination in combinations: subsystem = combination['subsystem'] load_case = combination['load_case'] failure_mech = combination['failure_mechanism'] data_found = False autofill_aux = autofill while True: try: # Locate stored input data system_ductility_factor = project.project_specific['PO_data'][subsystem][load_case]['mu_sys_iterated'] data_found = True break except: if autofill and autofill_aux: _viia_bilinearize_po_curve(project, [combination], True, False) autofill_aux = False continue else: project.write_log( f"ERROR: required input data for 'viia_assess_po_curve()'" f" could not be found for subsystem '{subsystem}' in load-case '{load_case}'.") break if not data_found: continue if soil_damping == True: project.write_log( "WARNING: No module for soil damping is yet included. Input is ignored.") damping_soil = 0.0 elif soil_damping == False: damping_soil = 0.0 else: project.write_log( "WARNING: Input for 'soil_damping' not recognized. It's effects are not included.") damping_inh = 0.05 # Determine hysteretic damping if failure_mech.lower() == 'brittle': damping_hys = 0.0 elif failure_mech.lower() == 'ductile': damping_hys = max(0.15, 0.42 * ( 1 - 0.9 / np.sqrt(system_ductility_factor) - 0.1 * np.sqrt(system_ductility_factor))) else: damping_hys = 0.0 project.write_log( "WARNING: failure mechanism type not recognized. Hysteretic damping is excluded") damping_total = damping_soil + damping_inh + damping_hys spectr_red_fac = min(1.0, max(0.55, np.sqrt(7 / (2 + damping_total * 100)))) project.project_specific['PO_data'][subsystem][load_case]['spectr_red_factor'] = spectr_red_fac return
[docs]def viia_pushover_nodes_reactionforces_m_eff(pushover_tbfile_dict, rforce_nodes): """ This function will look for the reaction force results from the TB file with specified node numbers. It will return a list of reaction forces of those nodes. This function is specifically used for calculating effective masses. Function use: ``DIANA`` Input: - Tabulated file with the reaction forces of the specified nodes - The required nodes for evaluation Output: - Returns a list of the reaction forces of nodes """ # # Read file # # lines = _viiaReadTbFile(tbfile=pushoverTBfile) # if not lines: return lines = pushover_tbfile_dict # Read result-file nodes_lst = [] for i, node in enumerate(rforce_nodes): nodes_lst.append(node) rforce_z = [] nodes_lst = set(nodes_lst) for node in nodes_lst: for i, line in lines.items(): if i == 'FORCE REACTI TRANSL': rforce_z.append( lines[i]['AxesGLOBAL'][node]['FBZ'][0]) # TODO here assume the first step is the self_weight return rforce_z
[docs]def _viia_get_pushover_result_data_from_tb( project: 'ViiaProject', analysis: [str] = 'A6', subsystems: Optional[List[str]] = None, load_cases: Optional[List[str]] = None, autofill: Optional[bool] = False): ''' This function will collect the displacement and the base shear data for the assessment for each load case and direction. For subsystem, the .tb file with displacement and bear shear are read. For each subsystem, the displacement result has been put into the result dictionary as an array with the size of load steps while the base shear is calculated by summing up the result from all nodes within this subsystem. This function is an antecedent to 'viia_get_pushover_result_dict_const()' Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -analysis: the analysis number, e.g. 'A6' -subsystems: list with names of subsystems to process -load_cases: list with names of load-cases to process -autofill: whether or not to call precedential functions when preparatory data is missing Output: - None: data is added to project.project_specific['PO_data'] ''' if not subsystems: subsystems = project.project_specific['PO_data']['subsystems'] if not load_cases: load_cases = project.project_specific['PO_data']['load_cases'] autofill_aux = autofill while True: try: if None in [project.project_specific['PO_data']]: return break except: if autofill and autofill_aux: _viia_get_pushover_result_dict_const(project) autofill_aux = False continue else: project.write_log( f"ERROR: required preperations for 'viia_get_pushover_result_dict_const()'" f" could not be found.") return # Get value for each load case and direction for load_case in load_cases: filenames = { 'Displacements': f"{project.name}_v{project.version}_OUTPUT_NLPO_DISPLA_TB.tb", 'Reaction forces': f"{project.name}_v{project.version}_OUTPUT_NLPO_BASE_SHEAR_TB.tb"} folder = Path( f"{project.workfolder_location}\\{project.diana_settings.analysis_settings[analysis]['folder_name']}\\{load_case}") try: project.current_analysis_folder = viia_get_latest_result_folder( project=project, version=project.version, folder=folder, files=filenames.values()) for subsystem in project.project_specific['PO_data']['subsystems']: project.project_specific['PO_data'][subsystem][load_case]['path'] = Path(project.current_analysis_folder) for key, value in filenames.items(): filenames[key] = f"{project.current_analysis_folder}\\{value}" except: project.write_log( f"ERROR: directory {folder.name} not found for the specified load-case", True) continue # Identify the direction if load_case[0].upper() == 'X': direction = 'x' else: direction = 'y' # Read .tb file try: disp, reactionforce, node_label_displ, node_label_react = _convert_tb_to_array(project, filenames, direction) except Exception as e: project.write_log( 'Error :' + f'{e}', True) return # Construct a dictionary for displacement collection find_disp_dict = dict(zip([int(i[5:].strip('')) for i in node_label_displ], disp)) for subsystem in subsystems: for storey in project.project_specific['PO_data'][subsystem]['storeys']: try: project.project_specific['PO_data'][subsystem][load_case][storey]['displacement'] = \ find_disp_dict[project.project_specific['PO_data'][subsystem][load_case][storey]['disp_node_nr']] except KeyError: project.write_log( 'WARNING: Node number {}'.format( project.project_specific['PO_data'][subsystem][load_case][storey][ 'disp_node_nr']) + f' is not output in .tb file. Please check system: {subsystem}', True) if project.viia_settings.analysis_subtype == 'SL': # TODO: pile case x & y # Sum up the base shear project.project_specific['PO_data'][subsystem][load_case]['base_shear'] = \ abs(np.sum(reactionforce, 0)) elif project.viia_settings.analysis_subtype == 'ML': # Construct the same format with node_label_react # foundation_nodes: ['Nodnr3871', ..., 'Nodnr3872'] foundation_nodes = [f'Nodnr{nr}' for nr in project.project_specific['PO_data'][subsystem][load_case]['base_shear_node_nr']] # Find nodes index in .tb matrix foundation_idx = [list(node_label_react).index(foundation_node) for foundation_node in foundation_nodes if foundation_node in list(node_label_react)] # Select the same index for row and add up vertically (row: node number for subsystem, col: load step) # The index of the reactionforce and node_label_react are the same project.project_specific['PO_data'][subsystem][load_case]['base_shear'] = abs( np.sum(reactionforce[foundation_idx, :], axis=0)) return
[docs]def _convert_tb_to_array(project: 'ViiaProject', filenames: [dict], direction: [str]): """ Function to read tabulated result files from pushover analyses and store displacements and reaction forces in arrays. Input: - project (obj): ViiaProject object containing collections and of fem objects and project variables. -filenames: dictionary with keys 'Displacements' and 'Reaction forces', storing the path of associated tb-files as strings -direction: 'x' or 'y', corresponding with the pushover direction Output: -an array storing all displacements for each step for each node number considered -an array storing all reaction/nodal forces for each step for each node number considered -all node numbers of nodes associated with displacements -all node numbers of nodes associated with reaction/nodal forces """ # Obtain displacement and reaction force data result_tabular_displ = fem_read_tbfile(filenames['Displacements']) result_tabular_react = fem_read_tbfile(filenames['Reaction forces']) # Evaluation nodes and base shear nodes try: node_label_displ = list(result_tabular_displ['DISPLA TOTAL TRANSL']['AxesGLOBAL'].keys()) except TypeError: try: node_label_displ = list(result_tabular_displ['DISPLA TOTAL TRANSL RELATI']['AxesGLOBAL'].keys()) except: project.write_log( f"ERROR: No known data format for displacements is recognized in {filenames['Displacements']}, " f"please check your data and add new format options if necessary.") return try: node_label_react = list(result_tabular_react['FORCE REACTI TRANSL']['AxesGLOBAL'].keys()) except TypeError: try: node_label_react = list(result_tabular_react['NODFOR TOTAL TRANSL']['AxesGLOBAL'].keys()) except: project.write_log( f"ERROR: No known data format for reaction forces is recognized in {filenames['Reaction forces']}, " f"please check your data and add new format options if necessary.") return # Collections of xyz displacement for all nodes if direction == 'x': try: disp = \ [result_tabular_displ['DISPLA TOTAL TRANSL']['AxesGLOBAL'][node]['TDtX'] for node in node_label_displ] except TypeError: disp = \ [result_tabular_displ['DISPLA TOTAL TRANSL RELATI']['AxesGLOBAL'][node]['TrDtX'] for node in node_label_displ] try: reactionforce = \ [result_tabular_react['FORCE REACTI TRANSL']['AxesGLOBAL'][node]['FBX'] for node in node_label_react] except TypeError: reactionforce = \ [result_tabular_react['NODFOR TOTAL TRANSL']['AxesGLOBAL'][node]['FNTX'] for node in node_label_react] elif direction == 'y': try: disp = \ [result_tabular_displ['DISPLA TOTAL TRANSL']['AxesGLOBAL'][node]['TDtY'] for node in node_label_displ] except TypeError: disp = \ [result_tabular_displ['DISPLA TOTAL TRANSL RELATI']['AxesGLOBAL'][node]['TrDtY'] for node in node_label_displ] try: reactionforce = \ [result_tabular_react['FORCE REACTI TRANSL']['AxesGLOBAL'][node]['FBY'] for node in node_label_react] except TypeError: reactionforce = \ [result_tabular_react['NODFOR TOTAL TRANSL']['AxesGLOBAL'][node]['FNTY'] for node in node_label_react] #Check if data lists are consistent and recover consistency if necessary max_disp = max([len(lst) for lst in disp]) min_disp = min([len(lst) for lst in disp]) max_reactionforce = max([len(lst) for lst in reactionforce]) min_reactionforce = min([len(lst) for lst in reactionforce]) if max_disp != min_disp or max_reactionforce != min_reactionforce or min_disp != min_reactionforce: project.write_log( "WARNING: the sizes of the data in the TB files are not consistent, data" "is truncated in order to restore consistency") min_all = min(min_disp, min_reactionforce) for i, item in enumerate(disp): disp[i] = item[0:min_all] for i, item in enumerate(reactionforce): reactionforce[i] = item[0:min_all] disp = np.array(disp) reactionforce = np.array(reactionforce) return disp, reactionforce, node_label_displ, node_label_react