diff --git a/failure/failureData.xlsx b/failure/failureData.xlsx new file mode 100644 index 00000000..8d71b22d Binary files /dev/null and b/failure/failureData.xlsx differ diff --git a/failure/failureProbabilities.py b/failure/failureProbabilities.py new file mode 100644 index 00000000..7322e725 --- /dev/null +++ b/failure/failureProbabilities.py @@ -0,0 +1,741 @@ +import numpy as np +import networkx as nx +import matplotlib.pyplot as plt +import scipy.stats +import copy +import math +import random + +from failure.graphBuilder import * +from failure.searchMethods import * +from failure.twoTurbineCaseStudy import * + +''' failureProbability ------------------------------------------------------------------------------------------------------------- + + ****************************** Last Updated: 23 April 2024 ****************************** + + Methods: + 1) transition_natrix: inputs adjacency matrix, probabilities --> output conditional probabilities, scaled probabilities + + 2) conditional_probabilities_update: input start node, list of probabilities --> output conditional probabilities + + 3) single_simulation: input start node, adjacency martrix, list of node nanes, update boolean --> output graph generated + + 4) single_backward_simulation: input start node, adjacency martrix, list of node nanes, update boolean --> output graph generated + + 5) monte_carlo_sim: number of iterations, plotting boolean, starting nodes, adjacency matrix, list of node names, random seed + booleanm, midpoint boolean --> output average probabilities, similarity between average and conditional probabilities + + 6) single_conditional_prob: inputs first probability, second probability, midpoint boolean --> output conditional probability + + 7) multi_cond_prob: input parent nodes, current nodes, probabilities, midpoint boolean --> output conditional probability + + 8) bayesian_table: input adjacency matrix, current node, midpoint boolean, node names, alternate probabilties boolean, + alternate probabilities, multiple turbines boolean --> output parents list, probability distribution table + + 9) write_bayesian_probabilities: input adjacency matrix, node names, probabilties, multiple turbines boolean, midpoint boolean calculation, + number of iterations --> writes probability distribution tables to Excel file (nothing returned) + +10) probability_over_time: input failure rate, time --> output probability of a failure at time t + +11) bayesian_inference: input adjacency matrix, node names, indicator nodes, evidence nodes, hypothesis nodes, probabilties, hypothesis +boolean, printing boolean, multiple turbine boolean, parent or child indicator, twoTurbine boolean --> outputs inference probabilities + +12) backward_bayesian_inference: input adjacency matrix, list of node names, array of nodes to start with, array of evidence nodes, + array of hypothesis nodes, probabilties, boolean for indexing into the right values of the array, boolean for multiple turbines, + parent or child indicator, boolean for twoTurbine method --> output two arrays of inference probabilities (one normalized and one not) + +------------------------------------------------------------------------------------------------------------------------------------ +------------------------------------------------------------------------------------------------------------------------------------ + + + transition_natrix Documentation + ---------------------- + This method inputs an adjacency matrix and vector of probabilities (corresponding to the probabilitiy that each + failure will happen). Since the data we have does not tell us the probability that any two event will happen, we + find the lower and upper bounds of what the conditional probability of every pair of related events (related indicated + by the adjacency matrix). We then take the median of these values as the probability that the second failure will + occur given that the first already occured. This method returns the array of these conditional probabilities and + a scaled array of these probabilities (scaled so that the sum of the columns is 1).''' + +def transition_matrix(arr, probabilities, mid_point = True): + arr = make_binary(arr) # Array to show which failures lead to other failures + nodes = diagonal_nodes(arr) # Matrix with numbers on the diagonal + + bounds = [] # Initialize list of upper and lower bounds + transitions = np.zeros(arr.shape) # Initialize matrix of conditional probabilities + + for node in range(arr.shape[0]): # For each node... + children_bool = arr[node] @ nodes # Find the children of the node + children = children_bool[np.nonzero(children_bool)] + + parent_probability = probabilities[node][0] # Find the probabilitiy of the node occuring + lower_bound = [] # Initialize lower bounds of all children of the node + upper_bound = [] # Initialize upper bounds of all children of the node + + for child in children: # For each of the node's children... + child_probability = probabilities[child - 1][0] # Find the probability of the child occuring + + # The lower bound is the probability of the child occuring (i.e. child and parent are completely indepedent evens) + lb = child_probability + # The upper bound is the overlap of the child and parent probabilities (i.e. child and parent are completely dependent events) + ub = min(parent_probability, child_probability)/parent_probability + + if mid_point: + tm_val = (lb + ub) / 2 # Calculate the midpoint of the two bounds + # print("Midpoint = True") --> for debugging + else: + rand_val = np.random.rand() + tm_val = rand_val * (ub - lb) + lb + # print(rand_val, tm_val, tm_val == ((lb + ub) / 2), lb, ub) --> for debugging + # print("Midpoint = False") --> for debugging + + transitions[node][child - 1] = tm_val + lower_bound.append(lb) # Append the lower bound to the list + upper_bound.append(ub) # Append the upper bound to the list + + lower_bound = np.array(lower_bound) # Convert lower and upper bounds to numpy arrays + upper_bound = np.array(upper_bound) + + midpoint = (upper_bound + lower_bound) / 2 # Calculate the midpoints for all the children of the parent node + + bounds.append([lower_bound, upper_bound, midpoint]) # Append the lower, upper, and midpoints to the list + + transition_matrix = transitions / np.sum(transitions, axis = 0) # Scale the conditional probabilities + transition_matrix[np.isnan(transition_matrix)] = 0 # Replace any NaN values with 0s + return transitions, transition_matrix # Return conditional probabilities and scaled conditional probabilities + +''' conditional_probabilities_update Documentation + -------------------------------------------------- + This method inputs a start value (indicating the starting node) and a list of probabilities that each node will + occur. We then compute conditional probabilities of all the nodes given the starting node. This method returns + the list of calculated conditional probabilities.''' + +def conditional_probabilities_update(start_arr, probabilities): + # Create vector of the parent probabilities + for start in start_arr: + parent_probability = np.reshape(np.repeat(probabilities[start][0], probabilities.shape[0]), probabilities.shape) + + # Calculate the conditional probabilities (using the midpoint method described in transition_matrix() method) + conditional_probabilities = (probabilities + np.reshape(np.min(np.hstack((probabilities, parent_probability)), axis = 1), probabilities.shape)/parent_probability[0])/2 + probabilities = conditional_probabilities + + return conditional_probabilities # Return the calculated probabilities + +''' bayesian_probabilities Documentation + -------------------------------------------------- + This method inputs a start value (indicating the starting node) and a list of probabilities that each node will + occur. We then compute conditional probabilities of all the nodes given the starting node. This method returns + the list of calculated conditional probabilities.''' + +def bayesian_probabilities(parents, child, probabilities): + for i in range(len(parents)): + rand_val = np.random.rand() + + + +''' single_simulation Documentation + --------------------------------------------- + This method inputs a starting node, adjacency martrix, list of node nanes, and a boolean that either uses the method of + updating conditional probabilities based on already visited nodes. We then compute the failure propagation and graph the + propagation via Networkx. This method returns the graph generated.''' + +def single_simulation(start_arr, arr, nodeNames, update = False, plot = False, rand_seed = True, mid_point = True): + monte_carlo_array = np.zeros(arr.shape) + probs = np.zeros((arr.shape[0], 1)) + + # Use the probabilities from the COREWIND failure rates + probabilities = np.array([0.0195, 0.0195, 0.013625, 0.0055, 0.0175, 0.2075, 0.001, 0.001, 0.001, 0.093185, 0.001, 0.001, + 0.027310938, 0.033968125, 0.033968125, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, + 0.0205, 0.0205, 0.02, 0.01, 0.01, 0.233, 0.288, 0.543374, 0.1285, 0.01, 0.01, 0.01, 0.015, 0.0155, + 0.015, 0.0155, 0.015, 0.0155, 0.015, 0.33, 0.025, 0.025, 0.025, 0.025, 0.025, 0.105]) #0.01375, + probabilities = np.reshape(probabilities, (arr.shape[0], 1)) # Reshape these probabilities into a vector + + G = nx.DiGraph() # Initialize a graph and a list of modes and effects for plotting nodes in different colors + effects = [] + modes = [] + + if update: # If you want to update the probabilities, update the probabilities given the indicated starting node + probabilities = conditional_probabilities_update(start, probabilities) + transitions, tm = transition_matrix(arr, probabilities, mid_point) # Compute the probabilities for all linked nodes + + if rand_seed: random.seed(16) # Use a random seed to get the same results (yet random results) each time + + adj = make_binary(arr).astype(int) # Make a binary adjacency matrix to determine the children of each node + nodes = diagonal_nodes(arr) # Diagonal array of node indices + 1 + + nodeList = np.reshape(np.repeat(False, arr.shape[0]), (arr.shape[0], 1)) # Create a list ot determine which nodes have been visited + gens = {} + queue = [] + + for start in start_arr: + G.add_node(str(0) + ": " + str(nodeNames[start-1])) + nodeList[start-1][0] = False + queue.append([start, 0, 0]) + gens.update({str(0) + ": " + str(nodeNames[start-1]): {"layer": 0}}) + # Determine if the source node is an effect or a mode, and add it to the correct array + '''if source < 49: modes.append(nodeNames[source - 1]) + else: effects.append(nodeNames[source - 1])''' + if start < 27: effects.append(nodeNames[start - 1]) # Add the start node to either the effects or mode list + else: modes.append(nodeNames[start - 1]) + + while len(queue) > 0: # For each node in the queue... + current = queue[0] # Get the node from the front of the queue + children_bool = adj[current[0]-1] @ nodes # vector of zeros and child names (numerical names) + children = children_bool[np.nonzero(children_bool)] #list of just the child names (numerical names) + + for child in children: # For each child of the current node... + if nodeList[child - 1] == False or gens[nodeNames[child - 1]]['layer'] > current[1]: # If the node has not been visited... + if np.random.rand() < transitions[current[0] - 1][child - 1]: # If the random value is less than the probability... + monte_carlo_array[current[0] - 1][child - 1] = 1 + probs[child - 1] = 1 + G.add_node(nodeNames[child-1]) # Add the child to the graph + if update: # If updateing is desired, update the probabilities with the addition of the child node + probabilities = conditional_probabilities_update(current[0]-1, probabilities) + transitions, tm = transition_matrix(arr, probabilities, mid_point) + + # Determine if the child is an effect or mode and add it to the correct array + if child < 27: effects.append(nodeNames[child - 1]) + else: modes.append(nodeNames[child - 1]) + + # Add an edge between the current node and child in the tree we are building + G.add_edge(nodeNames[current[0]-1], nodeNames[child-1]) + + queue.append([child, current[1]+1]) # Append the child to the queue + nodeList[child - 1] = True # Change the status of the child to say we have visited it + gens.update({nodeNames[child-1]: {"layer": current[1]+1}}) # Add the child to the layer dictionary + queue = queue[1:] # Remove the current node from the queue + + nx.set_node_attributes(G, gens) # Set the layers of the graph + + # Plot the graph + if plot: + pos = nx.multipartite_layout(G, subset_key='layer') + nx.draw_networkx_nodes(G, pos, nodelist=effects, node_color="#98c5ed", node_size=2700, edgecolors="#799dbd", node_shape="s") + nx.draw_networkx_nodes(G, pos, nodelist=modes, node_color="#fabc98", node_size=2700, edgecolors="#c89679", node_shape="s") + nx.draw_networkx_labels(G, pos, font_size=10, verticalalignment='center_baseline') + nx.draw_networkx_edges(G, pos, arrowsize=60) + plt.box(False) + plt.show() + + # draw_bfs_multipartite(arr, nodeNames, start, "child") # Plot the graph when all probabilities are one for comparison + return G, monte_carlo_array, probs # Return the graph + + + + +''' single_backward_simulation Documentation + --------------------------------------------- + This method inputs a starting node, adjacency martrix, list of node nanes, and a boolean that either uses the method of + updating conditional probabilities based on already visited nodes. We then compute the backward failure propagation and graph the + propagation via Networkx. This method returns the graph generated.''' + +def single_backward_simulation(start_arr, arr, nodeNames, update = False): + # Use the probabilities from the COREWIND failure rates + probabilities = np.array([0.0195, 0.0195, 0.013625, 0.0055, 0.0175, 0.2075, 0.001, 0.001, 0.001, 0.093185, 0.001, 0.001, + 0.027310938, 0.033968125, 0.033968125, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, + 0.0205, 0.0205, 0.02, 0.01, 0.01, 0.233, 0.288, 0.543374, 0.1285, 0.01, 0.01, 0.01, 0.015, 0.0155, + 0.015, 0.0155, 0.015, 0.0155, 0.015, 0.33, 0.025, 0.025, 0.025, 0.025, 0.025, 0.105]) #0.01375, + probabilities = np.reshape(probabilities, (arr.shape[0], 1)) # Reshape these probabilities into a vector + + G = nx.DiGraph() # Initialize a graph and a list of modes and effects for plotting nodes in different colors + effects = [] + modes = [] + queue = [] + gens = {} + + if update: # If you want to update the probabilities, update the probabilities given the indicated starting node + probs = conditional_probabilities_update(start, probabilities) + else: probs = probabilities + transitions, tm = transition_matrix(arr.T, probs) # Compute the probabilities for all linked nodes + + # random.seed(16) # Use a random seed to get the same results (yet random results) each time --> feel free to comment out + + adj = make_binary(arr).astype(int) # Make a binary adjacency matrix to determine the children of each node + nodes = diagonal_nodes(adj) # Create a matrix with node names to determine the children of each node + nodeList = np.reshape(np.repeat(False, arr.shape[0]), (arr.shape[0], 1)) # Create a list ot determine which nodes have been visited + + for start in start_arr: + G.add_node(str(0) + ": " + str(nodeNames[start-1])) + nodeList[start-1][0] = False + queue.append([start, 0, 0]) + gens.update({str(0) + ": " + str(nodeNames[start-1]): {"layer": 0}}) + # Determine if the source node is an effect or a mode, and add it to the correct array + '''if source < 49: modes.append(nodeNames[source - 1]) + else: effects.append(nodeNames[source - 1])''' + if start < 27: effects.append(nodeNames[start - 1]) # Add the start node to either the effects or mode list + else: modes.append(nodeNames[start - 1]) + + while len(queue) > 0: # For each node in the queue... + current = queue[0] # Get the node from the front of the queue + children_bool = adj[current[0]-1] @ nodes # vector of zeros and child names (numerical names) + children = children_bool[np.nonzero(children_bool)] #list of just the child names (numerical names) + + for child in children: # For each child of the current node... + if nodeList[child - 1] == False or gens[nodeNames[child - 1]]['layer'] < current[1]: # If the node has not been visited... + if np.random.rand() < transitions[current[0] - 1][child - 1]: # If the random value is less than the probability... + G.add_node(nodeNames[child-1]) # Add the child to the graph + if update: # If updateing is desired, update the probabilities with the addition of the child node + probs = conditional_probabilities_update(current[0]-1, probs) + transitions, tm = transition_matrix(arr.T, probs) + + # Determine if the child is an effect or mode and add it to the correct array + if child < 27: effects.append(nodeNames[child - 1]) + else: modes.append(nodeNames[child - 1]) + + # Add an edge between the current node and child in the tree we are building + G.add_edge(nodeNames[child-1], nodeNames[current[0]-1]) + + queue.append([child, current[1]-1]) # Append the child to the queue + nodeList[child - 1] = True # Change the status of the child to say we have visited it + gens.update({nodeNames[child-1]: {"layer": current[1]-1}}) # Add the child to the layer dictionary + queue = queue[1:] # Remove the current node from the queue + + nx.set_node_attributes(G, gens) # Set the layers of the graph + + # Plot the graph + # pos = nx.multipartite_layout(G, subset_key='layer') + # nx.draw_networkx_nodes(G, pos, nodelist=effects, node_color="#98c5ed", node_size=2700, edgecolors="#799dbd", node_shape="s") + # nx.draw_networkx_nodes(G, pos, nodelist=modes, node_color="#fabc98", node_size=2700, edgecolors="#c89679", node_shape="s") + # nx.draw_networkx_labels(G, pos, font_size=10, verticalalignment='center_baseline') + # nx.draw_networkx_edges(G, pos, arrowsize=60) + # plt.box(False) + # plt.show() + return G, nx.to_numpy_array(G), probs # Return the graph + + + + +''' monte_carlo_sim Documentation + --------------------------------------------- + This method inputs the number of iterations, a boolean for plotting the average graph or not, an array of the nodes to start with, + an adjacency matrix, list of nodeNames, boolean for a random seed, and a boolean for using a midpoint calculation. We then generate + a graph with failure probabilities for the number of iterations and find the average graph and the probability that each node + is in the graph (the number of times the node shows up divided by the number of iterations). Lastly, we calculate the similarity + between the probability of the nodes in the graph (that we just calculated) compared to the estimated probability calculated via + conditional probabilities. We return the first list of probabilities and cosine similarity between the two lists of probabilities.''' + +def monte_carlo_sim(num_iterations, plot, start_arr, adjacency_matrix, nodeNames, rand_seed, mid_point): + # Initialize the adjacency matrix to trrack which nodes are in each graph (which we will then calculate an average from) + adj_matrices = np.zeros(adjacency_matrix.shape) + + # Initialize array for probabilities to track average probabilities for each node + probs = np.zeros((adjacency_matrix.shape[0], 1)) + + # List of probabilities for each failure happening + probabilities = np.array([0.0195, 0.0195, 0.013625, 0.0055, 0.0175, 0.2075, 0.001, 0.001, 0.001, 0.093185, 0.001, 0.001, + 0.027310938, 0.033968125, 0.033968125, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, + 0.0205, 0.0205, 0.02, 0.01, 0.01, 0.233, 0.288, 0.543374, 0.1285, 0.01, 0.01, 0.01, 0.015, 0.0155, + 0.015, 0.0155, 0.015, 0.0155, 0.015, 0.33, 0.025, 0.025, 0.025, 0.025, 0.025, 0.105]) #0.01375, + probabilities = np.reshape(probabilities, (adjacency_matrix.shape[0], 1)) + + # Run each simulaiton + for i in range(num_iterations): + arr = copy.deepcopy(adjacency_matrix) + G, adj_mat, prob = single_simulation(start_arr, arr, nodeNames, rand_seed = rand_seed, mid_point = mid_point) + adj_matrices += adj_mat.astype(float) + probs += prob + # print(i+1) # Debugging --> feel free to uncomment + + # Calculate average graph and average probabilities + adj_matrices = adj_matrices/num_iterations + probs = probs/num_iterations + K, abc = matrix2Graph(adj_matrices, nodeNames, True) + + # Calculate similarity between average and conditional probabilities + v1 = conditional_probabilities_update(start_arr, probabilities) + v2 = probs + + # Plot average graph + if plot: + draw_bfs_multipartite(adj_matrices, nodeNames, start_arr, "child") + return v2, cosine_similarity(v1, v2) # Return average probabilities and similarity of average and conditional probabilities + + + + +''' single_conditional_prob Documentation + --------------------------------------------- + This method inputs the probability of first failure, probability of second failure, and boolean for using the midpoint calculation + or not. We use these probabilities to estimate the conditional probability between the two failures. We return this conditional probability.''' + +def single_conditional_prob(pa, pb, mid_point = True): + lb = pb * pa # Lower bound for conditional probability + ub = min(pa, pb) # Upper bound for conditional probability + if mid_point: + overlap = (lb + ub) / 2 # Find midpoint between the lower and upper bounds + else: + rand_val = np.random.rand() + overlap = rand_val * (ub - lb) + lb # Find a random value between the lower and upper bounds + return overlap/pa # Return the conditional probability + + + + +''' multi_cond_prob Documentation + --------------------------------------------- + This method inputs array of parent nodes, current nodes, array of probabilities, and boolean for using a midpoint calculation or not. We + calculate the conditional probability when the node has multiple parents (i.e. probability of current event given the parent events). This + method outputs the conditional probability.''' + +def mult_cond_prob(parents, current, probabilities, midpoint = True): + if len(parents) < 1: + return probabilities[current - 1] # If no parents, return the probability of the current node + + # Create list of parent node probabilities, with the probability of the current node appended to the end + parents_sub = [int(parent - 1) for parent in parents] + parents_sub.append((current - 1)) + parents_sub = np.array(parents_sub) + parent_probs = probabilities[parents_sub] + # Initialize denomenator + denomenator = 1 + + # Calculate the single conditional probability for pairs of events until there are no events left + while len(parent_probs) > 1: + if (not isinstance(parent_probs[1][0], float)) and len(parent_probs[1][0]) > 1: + parent_probs[1][0] = parent_probs[1][0][1] + lb = parent_probs[0][0] * parent_probs[1][0] # Lower bound for conditional probability + ub = min(parent_probs[0][0], parent_probs[1][0]) # Upper bound for conditional probability + if midpoint: + val = (lb + ub) / 2 # Find midpoint between the lower and upper bounds + else: + rand_val = np.random.rand() + val = rand_val * (ub - lb) + lb + parent_probs = np.vstack(([val], parent_probs[2:])) + if len(parent_probs) == 2: + denomenator = parent_probs[0][0] + # print(parent_probs[0][0], denomenator, parent_probs[0][0]/denomenator) + return parent_probs[0][0]/denomenator # Return the conditional probability + + + + +''' bayesian_table Documentation + --------------------------------------------- + This method inputs adjacency matrix, current node, boolean for midpoint calculation, list of node names, boolean for alternate probabilties, + array of new probabilities, and boolean for calculations among multiple turbines. We calculate the probability distribution and format it + into a table. This method outputs the list of parents and the probability distribution table.''' + +def bayesian_table(adj, current, midpoint, nodeNames, pvs = False, prob_vals=None, mult_turbine = False): + nodes = diagonal_nodes(adj) # Diagonal matrix of node's numerical names +1 + adj = make_binary(adj, 0.5) # Binarize adjacency matrix + + parents_bool = nodes @ adj[:, current-1] # vector of zeros and parent names (numerical names) + parents = list(parents_bool[np.nonzero(parents_bool)]) # list of just the parent names (numerical names) + prob_table = np.zeros((2 ** len(parents), len(parents) + 2)) # Initialize the array for the node's conditional probability distribution + + # *** Code in development for multiple turbine bayesian tables *** + arr_nonbinary = adj.copy() + num_parents = len(parents) + + '''if mult_turbine: + for p in range(num_parents): + if arr_nonbinary[parents[p] - 1][current - 1] > 1: + print("Unlocked!", parents[p]) + parents.append(parents[p])''' + # *** End of code in development ********************************* + + for iteration in range(2 ** len(parents)): # For each combination of parents having either failed or not + if pvs: # Determine the probabilities being used + probabilities = prob_vals.copy() + else: + probabilities = np.array([0.0195, 0.0195, 0.013625, 0.0055, 0.0175, 0.2075, 0.001, 0.001, 0.001, 0.093185, 0.001, 0.001, + 0.027310938, 0.033968125, 0.033968125, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, + 0.0205, 0.0205, 0.02, 0.01, 0.01, 0.233, 0.288, 0.543374, 0.1285, 0.01, 0.01, 0.01, 0.015, 0.0155, + 0.015, 0.0155, 0.015, 0.0155, 0.015, 0.33, 0.025, 0.025, 0.025, 0.025, 0.025, 0.105]) #0.01375, + probabilities = np.reshape(probabilities, (len(probabilities), 1)) + # print(probabilities) + + true_false_array = [] # Iniitalize to record which parents have failed + + for p in range(len(parents)): + probabilities[int(parents[p]-1)] = abs((int(iteration / (2 ** p)) % 2) - probabilities[int(parents[p]-1)]) # Determine parent probability (given if the parent has failed or not) + true_false_array.append((int(iteration / (2 ** p)) % 2)) # Append whether or not the parent node has failed + prob_table[iteration][p] = int(iteration / (2 ** p)) % 2 # Append whether or not the parent node has failed + + # if mult_turbine and nodeNames[int(current - 1)][:3] != nodeNames[int(parents[p] - 1)][:3]: #and p > 0) and parents[p] <= parents[p-1]: + # probabilities[int(parents[p]-1)] *= 0.7 # If there is more than one turbine, then decrease the probability of failure by 30% + prob = mult_cond_prob(parents, current, probabilities, midpoint) # Calculate the conditional probability of the node given if the parents have failed or not + # print(true_false_array, prob) + # print(probabilities) + prob_table[iteration][-2] = prob # Add calculated probability to the array + prob_table[iteration][-1] = 1 - prob # Add the probability of the node not failing to the array + #print(prob_table) + return parents, prob_table # Return a list of the parents and the probability distribution array + + + + +''' write_bayesian_probabilities Documentation + ---------------------------------------------- + This method inputs adjacency matrix, list of node names, probabilties, boolean for calculations among multiple turbines, + boolean for midpoint calculation, and number of iterations. We calculate the probability distribution for each node in the graph and format it + into a table. This method writes the probability distribution tables to an Excel file (nothing returned).''' + +def write_bayesian_probabilities(adjacency_matrix, nodeNames, probabilities, mult_turbine, midpoint=True, num_iterations=1, twoTurbines = False, name=""): + ps = probabilities.copy() + with pd.ExcelWriter("bayesianProbabilities"+name+".xlsx") as writer: # Write to file + for current in range(1, adjacency_matrix.shape[0]+1): # Repeat for each node in the graph + if any(probabilities != ps): + print("Unequal probabilities", current-1) + break + adjacency_matrix2 = adjacency_matrix.copy() + parents, our_table = bayesian_table(adjacency_matrix2, current, midpoint, nodeNames, True, probabilities, mult_turbine) # Probability distrubution table + if twoTurbines: + parents, our_table = twoTurbine_bayesian_table(adjacency_matrix, adjacency_matrix, current, nodeNames, nodeNames) # Probability distrubution table + parents = [int(parent) for parent in parents] # Format parent list + + # Find average probability distribution table for the number of iterations + for i in range(num_iterations - 1): + parents, our_table2 = bayesian_table(adjacency_matrix, current, midpoint, nodeNames = nodeNames, pvs=True, prob_vals=probabilities, mult_turbine=mult_turbine) + our_table += our_table2 + + # Update column titles for the probability distribution table + parents = [nodeNames[parent - 1].replace("\n", " ") for parent in parents] + parents = np.append(parents, "P(" + nodeNames[current - 1].replace("\n", " ") + " = True)") + parents = np.append(parents, "P(" + nodeNames[current - 1].replace("\n", " ") + " = False)") + df = pd.DataFrame(our_table, columns = parents) + + df.to_excel(writer, sheet_name=nodeNames[current - 1].replace("\n", " ").replace("/", " or ").replace(":", "")[:31]) # Write to file + print(current, nodeNames[current - 1].replace("\n", " ")[:31]) # Print current node to keep track of which nodes have a probability distribution table + + + + +''' probability_over_time Documentation + ---------------------------------------------- + This method inputs a failure rate, lamda, and a time, t. We return the probability of a failure at time t.''' + +def probability_over_time(lamda, t): + return 1 - math.exp((np.log(1-lamda)) * t) # Return the updated probability of failure given a certain time + + + + +''' bayesian_inference Documentation + ---------------------------------------------- + This method inputs adjacency matrix, list of node names, array of nodes to start with for generating the graph of the Bayesian network, array of + evidence nodes, array of hypothesis nodes, probabilties, boolean for finding hte probability of failure of the hypothesis, boolean for printing + information about the calculations, boolean for multiple turbines, parent or child calculation (string), and boolean for using the twoTurbine case + study method. We calculate Bayesian inference given the evidence and hypothesis variables. This method outputs an array of inference probabilities.''' + +def bayesian_inference(arr, nodeNames, indicator, num_evidence, num_hypothesis, probabilities, tf = True, printing = False, multi= False, poc="parent", twoTurbine = False): + # print("E", num_evidence) + # print("H", num_hypothesis) + a = arr.copy() + non = nodeNames + prblts = probabilities + evidence = num_evidence + hypothesis = num_hypothesis + if not multi: + K, a, g, e, m, non = breadth_first_multi(a, nodeNames, indicator, poc) # Generate tree for Bayesian network + # draw_bfs_multipartite(arr, nodeNames, indicator, type = "multi-"+poc, multi_turbine = False) + prblts = [] # Initialize array of node probabilities (in order of appearance in graph) + for node in non: + node_index = np.where(nodeNames == node)[0][0] + prblts.append(probabilities[node_index]) # Add nodes to array of node probabilities + prblts = np.array(prblts) + + evidence = [] + hypothesis = [] + for hypothesis_node in num_hypothesis: # Adjust hypothesis node so that index in tree matches up with index in original adjacency matrix + if twoTurbine: + non = np.array(non) + if nodeNames[hypothesis_node - 1] not in non: # If node is not in tree, then there is a 0% probability of failure + print("Probability table of", nodeNames[hypothesis_node - 1], "...", np.reshape(np.array([0,1]), (1,2))) + return np.reshape(np.array([0,1]), (1,2)), np.reshape(np.array([0,1]), (1,2)) + hypothesis.append(np.where(non == nodeNames[hypothesis_node - 1])[0][0]) + + for evidence_node in num_evidence: # Adjust evidence node so that index in tree matches up with index in original adjacency matrix + if twoTurbine: + non = np.array(non) + if nodeNames[evidence_node - 1] not in non: # If node is not in tree, then this is an error! + print("ERROR - evidence node, " + nodeNames[evidence_node - 1] + ", not in graph!") + if evidence_node in num_hypothesis:# If node is in hypothesis, then probability of failure is 100% + print("Probability table of", nodeNames[evidence_node - 1], "...", np.reshape(np.array([1,0]), (1,2))) + return np.reshape(np.array([1,0]), (1,2)), np.reshape(np.array([1,0]), (1,2)) + evidence.append(np.where(non == nodeNames[evidence_node - 1])[0][0]) + + probabilitiy_table = np.zeros((2, a.shape[0])) # Initialize table of inference probabilities + nodes = diagonal_nodes(a) # Diagonal matrix of node names (numerical +1) + a = make_binary(a, 0.5) # Binarize adjacency table + hypothesis_nodes_array = np.zeros((len(hypothesis), 2)) + + # Depending on tree (forward or backward propagation), either iterate through nodes forward or backwards + if poc == "parent": + this_range = reversed(range(a.shape[0])) + elif poc == "child": + this_range = range(a.shape[0]) + + for node in this_range: + pts_bool = nodes @ a[:, node] # vector of zeros and child names (numerical names) + pts = pts_bool[np.nonzero(pts_bool)] #list of just the child names (numerical names) + + if len(pts) < 1: # If no parents, the probability is the initial probability + probabilitiy_table[0][node] = prblts[node] + probabilitiy_table[1][node] = 1 - prblts[node] + continue + + parents, our_table = bayesian_table(a, node+1, True, nodeNames, True, prblts) # Calculate the probability distribution table + + # If only using two turbines (specific to case study), calculate using twoTurbine method + if twoTurbine: + parents, our_table = twoTurbine_bayesian_table(a, arr, node + 1, nodeNames, non) # Calculate the probability distribution table + mlt_table = np.ones((our_table.shape[0],2)) # Initialize table for multiplying across rows of probability distribution table + + for i in range(our_table.shape[0]): + for j in range(our_table.shape[1] - 2): + parent = int(parents[j]) + + # Replace boolean in probability distribution table with probability of this event + if our_table[i,j] == 0: + # print(a.shape, probabilitiy_table.shape) + our_table[i,j] = probabilitiy_table[0][parent - 1] + + # If the node is in the hypothesis or evidence array, do not include this + if probabilitiy_table[0][parent - 1] == 0: + # print("indexing_error!!", parent, node) + # print(non[parent], non[node]) + break + if (parent-1 in hypothesis and tf): # or (parent in evidence): + our_table[i,j] = 0 + # print("in hypto and tf or in evidence", parent) + else: + our_table[i,j] = probabilitiy_table[1][parent - 1] + + # If the node is in the hypothesis or evidence array, do not include this + if (parent-1 in hypothesis and not tf) or (parent-1 in evidence): + our_table[i,j] = 0 + # print("in hypto and not tf or in evidence", parent) + + mlt_table[i,0] *= our_table[i,j] # Multiply the probabilities across the probability distribution table + mlt_table[i,1] = mlt_table[i,0] * our_table[i, -1] # Multiple by the probability of event not failing given combination of parent failure + mlt_table[i,0] *= our_table[i, -2] # Multiple by the probability of event failing given combination of parent failure + + sm_table = np.sum(mlt_table, axis = 0) #/np.sum(mlt_table) # Sum the products of probabilities across the columns + + if node in hypothesis: + print("Probability table of", non[node].replace("\n", " "), "...", sm_table) + hypothesis_nodes_array[np.where(np.array(hypothesis) == node)[0][0]][0] = sm_table[0] + hypothesis_nodes_array[np.where(np.array(hypothesis) == node)[0][0]][1] = sm_table[1] + if all(hypothesis_nodes_array != 0): + return probabilitiy_table, hypothesis_nodes_array + + probabilitiy_table[0][node] = sm_table[0] # Update the inference probability table with the probabilites just calculated + probabilitiy_table[1][node] = sm_table[1] + + if printing: # Print the inference probability, along with the indicators, evidence, and hypothesis conditionals + print("Indicator:", nodeNames[np.array(indicator)]) + print("Evidence:", nodeNames[np.array(evidence)]) + print("Hypothesis:", nodeNames[np.array(hypothesis)]) + print("Probability of Failure:", probabilitiy_table[:, 0][0], probabilitiy_table[:, 0][0]/np.sum(probabilitiy_table[:, 0])) + print("Probability of No Failure:", probabilitiy_table[:, 0][1], probabilitiy_table[:, 0][1]/np.sum(probabilitiy_table[:, 0])) + return probabilitiy_table, hypothesis_nodes_array # Return array or inference probabilities + + + + +''' backward_bayesian_inference Documentation + ---------------------------------------------- + This method inputs adjacency matrix, list of node names, array of nodes to start with for generating the graph of the Bayesian network, array of + evidence nodes, array of hypothesis nodes, probabilties, boolean for indexing into the right values of the array, boolean for multiple turbines, parent or child + calculation type (string), and boolean for using twoTurbine case study method. We calculate Bayesian inference given the evidence and hypothesis variables. + This method outputs an array of inference probabilities (normalized and not).''' + +def backward_bayesian_inference(adjacency_matrix, nodeNames, indicator, evidence, hypothesis, probabilities, start_bool = True, multi = False, poc="parent", twoTurbine = False): + # Calculate Bayesian inference for hypothesis = True and hypothesis = False + #print("--- First Run of Bayesian Inference ----------------") + # print("Before bn", probabilities.shape) + printing = False + if len(evidence) > 0 and len(hypothesis)>0: + printing = False + + pt1, hnd1 = bayesian_inference(adjacency_matrix, nodeNames, indicator, evidence, hypothesis, probabilities, True, printing=printing, multi = multi, poc=poc, twoTurbine=twoTurbine) + #print() + #print("--- Second Run of Bayesian Inference ---------------") + if poc == "parent": + pt2, hnd2 = bayesian_inference(adjacency_matrix, nodeNames, indicator, evidence, hypothesis, probabilities, False, printing=printing, multi = multi, poc=poc, twoTurbine=twoTurbine) + # print("pt1", pt1[:, 0]) + # print("pt2", pt2[:, 0]) + # Choose correct indexing value + index = 1 + if start_bool: + index = 0 + + if multi: + return [hnd1[0][0]/(hnd1[0][0]+hnd1[0][1]), hnd1[0][1]/(hnd1[0][0]+hnd1[0][1])], [0,0] + + # Depending on what our evidence and hypothesis nodes are, index into the correct values + if poc == "parent" and 0 in evidence: + p_true = pt1[:, 0][index] + p_false = pt2[:, 0][index] + + elif poc == "parent" and 0 in hypothesis: + p_true = pt1[:, 0][0] + pt2[:, 0][0] + p_false = pt1[:, 0][1] + pt2[:, 0][1] + + elif poc == "child": + if hnd1.shape[0] == 1: + p_true = hnd1[0][0] + p_false = hnd1[0][1] + + else: # If multiple nodes in hypothesis, multiply all possibilities together to get normailized value + print("Starting hypothesis for-loops...") + p_false = 0 + for iteration in range(2 ** hnd1.shape[0]): + p_false_mult = 1 + for node in range(hnd1.shape[0]): + p_false_mult *= hnd1[node][int(iteration / (2 ** node)) % 2] + if iteration == 0: + p_true = p_false_mult + else: + p_false += p_false_mult + if (iteration) % 1000000000000 == 0: + print("Percent done:", iteration/(2 ** hnd1.shape[0]) * 100) + print("Percent done:", 100) + else: + p_true = np.sum(pt1[:, 0]) + p_false = np.sum(pt2[:, 0]) + + probability_distribution = [p_true/(p_true + p_false), p_false/(p_true + p_false)] # Normalize the probability distribution of the event happening + + return probability_distribution, [p_true, p_false] # Return the normalized probability distribution and unnormalized distribution + + + +''' +# ----------- For running the code: feel free to uncomment ------------------- +arr, nodeNames = excel2Matrix("failureData.xlsx", "bigMatrix") +#random.seed(18) +start = 30 #np.random.randint(0, arr.shape[0]) +probabilities = np.array([0.0195, 0.0195, 0.013625, 0.0055, 0.0175, 0.2075, 0.001, 0.001, 0.001, 0.093185, 0.001, 0.001, + 0.027310938, 0.033968125, 0.033968125, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, + 0.0205, 0.0205, 0.02, 0.01, 0.01, 0.233, 0.288, 0.543374, 0.1285, 0.01, 0.01, 0.01, 0.015, 0.0155, + 0.015, 0.0155, 0.015, 0.0155, 0.015, 0.33, 0.025, 0.025, 0.025, 0.025, 0.025, 0.105]) #0.01375, +probabilities = np.reshape(probabilities, (arr.shape[0], 1)) +start = 44 #np.random.randint(1, arr.shape[0]+1) +single_simulation(start, arr, nodeNames) +single_simulation(start, arr, nodeNames, update=True) + +num_iterations = 100 +plot = True +start = 11 #np.random.randint(1, arr.shape[0]+1) +monte_carlo_sim(num_iterations, plot, start, adjacency_matrix, nodeNames, rand_seed=False, mid_point=False)''' + + + + +'''# ----------- For running the code: feel free to uncomment ------------------- +adjacency_matrix, nodeNames = excel2Matrix("ExcelFiles/failureData.xlsx", "bigMatrix") +probabilities = np.array([0.0195, 0.0195, 0.013625, 0.0055, 0.0175, 0.2075, 0.001, 0.001, 0.001, 0.093185, 0.001, 0.001, + 0.027310938, 0.033968125, 0.033968125, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, 0.01375, + 0.0205, 0.0205, 0.02, 0.01, 0.01, 0.233, 0.288, 0.543374, 0.1285, 0.01, 0.01, 0.01, 0.015, 0.0155, + 0.015, 0.0155, 0.015, 0.0155, 0.015, 0.33, 0.025, 0.025, 0.025, 0.025, 0.025, 0.105]) #0.01375, +probabilities = np.reshape(probabilities, (adjacency_matrix.shape[0], 1)) +targets = [44] +starts = [16, 33, 35, 36] + +# C, D = bayesian_table(adjacency_matrix, 16, True, pvs = True, prob_vals=probabilities, mult_turbine = False) +# A, B = backward_bayesian_inference(adjacency_matrix, nodeNames, [0], [0], [44], probabilities, start_bool = True) +# print(B) +write_bayesian_probabilities(adjacency_matrix, nodeNames, probabilities, mult_turbine = True, midpoint = True, num_iterations=1)''' \ No newline at end of file diff --git a/failure/failureProbabilities.xlsx b/failure/failureProbabilities.xlsx new file mode 100644 index 00000000..0b960e89 Binary files /dev/null and b/failure/failureProbabilities.xlsx differ diff --git a/failure/graphBuilder.py b/failure/graphBuilder.py new file mode 100644 index 00000000..451bb184 --- /dev/null +++ b/failure/graphBuilder.py @@ -0,0 +1,488 @@ +import pandas as pd +import numpy as np +import networkx as nx +import matplotlib.pyplot as plt + +''' graphBuilder--------------------------------------------------------------------------------------------------- + + ****************************** Last Updated: 19 February 2024 ****************************** + + Methods: + 1) excel2Matrix: inputs fileName, sheetName, boolean of nodeLabels --> output adjacency matrix and name array + + 2) matrix2Graph: inputs adjacency matrix, name of nodes, boolean of nodeLabels --> output graph and adjacency + matrix + + 3) export2graphml: inputs G, string (file name) --> writes graphML file with inputted file name (no return) + + 4) get_node_dict: inputs adjacency matrix and name of nodes --> outputs array containing dictionaries with each + entry containing the node's name, number, and whether it is an effect or mode. + + 5) get_node_array: inputs adjacency matrix and name of nodes --> outputs array with dictionary entries of the + names of the nodes + + 6) get_graph_edges: inputs adjacency matrix, string indicating type of output desired, and threshold --> outputs + an array of edges + + 7) make_binary: inputs adjacency matrix and float type --> outputs binarized adjacency matrix + + 8) plot_graph: inputs graph and type (str) --> no output (prints graph) + + 9) max_degrees: inputs adjacency matrix, names of nodes, threshold for binarization of adjacency matrix, boolean + for showing names --> outputs tuples of max out degree, max in degree, and max overall degree with the nodes that + have these max degrees + +10) getProbabilities: inputs fileName, sheetName, boolean of nodeLabels --> output probabilities and name array + +11) diagonal_nodes: inputs adjacency matrix --> outputs nodes diagonal matrix + +12) cosine_similarity: inputs vector one and vector two --> outputs cosine similarity of vector one and two + +13) make_undirected_unweighted: inputs adjacency matrix and (optional) threshold --> outputs undirected, unweighted + version of the adjacency matrix + +------------------------------------------------------------------------------------------------------------------ +------------------------------------------------------------------------------------------------------------------ + + + excel2Matrix Documentation + ------------------------------- + This code is meant to take in an excel file (already properly formatted by theh user) and output the corresponding + graph. This assumes that the contents of the excel file is an adjacency matrix. The sheetName input identifies which + sheet in the excel file the adjacency matrix is in. The nodeLabels input lets the user choose if they would like + words (input TRUE) or numeric (input FALSE) labels on the graph. The plot input lets the user toggle on/off plotting + of the graph. Finally, the graph is returned.''' + +def excel2Matrix(fileName, sheetName = None): + # Read in excel file as a dataframe + df = pd.read_excel(fileName, sheet_name=sheetName) + + # Convert the data frame to a numpy array + arr = df.to_numpy() + + # Create an array of the names of the nodes for the graph. + nodeNames = arr[:, 0].flatten() + + # return the adjacency matrix without labels and an array of the names of the nodes + return arr[:, 1:], nodeNames + + + +''' matrix2Graph Documentation + ------------------------------- + This method takes in an adjacency matrix, the names of the nodes, whether or not to display the node names, and + whether or not to display the plot of the graph once constructed. The output is the graph and adjacency matrix.''' + +def matrix2Graph(arr, nodeNames, nodeLabels = False): + # Create the graph (DiGraph because the graph is directed and weighted) and add the nodes selected above + G = nx.DiGraph() + if not nodeLabels: + nodeNames = range(arr.shape[0]) + G.add_nodes_from(nodeNames) + + # The following nested for loops find the edges of the graph and add them to the graph using the 'add_weighted_edges_from()' + # command. 'i' indexes through the rows and 'j' indexes through the columns. If there is no edge (i.e. 0 in the adjacency + # matrix), then move on to the next set of nodes. Otherwise, add the edge to the graph. + for i in range(arr.shape[0]): + for j in range(arr.shape[1]): + if arr[i,j] == 0: + continue + else: + if nodeLabels: G.add_weighted_edges_from([(nodeNames[i], nodeNames[j], arr[i,j])]) + else: G.add_weighted_edges_from([(i, j, arr[i,j])]) + + # Return the graph and adjacency matrix + return G, arr + + + +''' export2graphML Documentation + ------------------------------- + This method takes in a networkx graph and ouputs a graphML file.''' + +def export2graphML(G, name): + + # create a name for the graphML file that you are going to write + filename = name + "_FOWT_graphML.graphml" + + # wrtie the graphML file using networkx methods + nx.write_graphml(G, filename, encoding='utf-8', prettyprint=True, infer_numeric_types=True, named_key_ids=True, edge_id_from_attribute=None) + return + + + +''' get_node_dict Documentation + ------------------------------- + This method takes in the adjacency matrix and the names of all the nodes. It outputs an array of node dictionaries. + Each key/value pair contains the node's number, name, and 'class_id' (which refers to if the node represents a + failure effect or failure mode).''' + +def get_node_dict(arr, nodeNames): + # Initialize array for the nodes + nodes = [] + + # Iterate through each node number + for index in range(arr.shape[0]): + + # If the number is less than 26 (value chosen based on excel sheets), then the node is a failure effect. + if index < 26: + class_id = 'effect' + + # Otherwise, the node must be a failure mode. + else: + class_id = 'mode' + + # Add a dictionary with the node's number, name, selection criteria (in this case they can all be selected), + # and class_id (whether they are an effect or mode). + nodes.append({ + 'data': {'id': str(index), 'label': nodeNames[index]}, + 'selectable': True, + 'classes': class_id + }) + + # Return the array of dictionaries for the nodes + return nodes + + + +''' get_node_array Documentation + ------------------------------- + This method takes in an adjacency matrix and list of node names. It outputs an array with each entry a + key/value pair. Each key is the string 'label', but the values are the string names of the nodes.''' + +def get_node_array(arr, nodeNames): + # Initialize an array for the nodes + nodes = [] + + # Iterate through each node number + for index in range(arr.shape[0]): + + # For every node, locate its name and append a dictionary with 'label' as the only key and + # the name just acquired as the only value. + nodes.append({'label': nodeNames[index]}) + + # Return the array of nodes + return nodes + + +''' get_grpah_edges Documentation + ------------------------------- + This method takes in an adjacency matrix (array type) and a string indicating the type of edge list + desired. This method outputs an array of all the edges. If the 'dictionary' type is indicated, then each + entry in the array is formatted as + {'data': {'id': name_of_source_and_target, 'source' name_of_source, 'target', name_of_target}}. + If the 'array' type is indicated, then each entry in the array is formatted as + [source, target]). + Lastly, if the 'tuple' type is indicated, then each entry in the array is formatted as + (source, target).''' + +def get_graph_edges(arr, type, threshold): + # Initialize an array for the edges + edges = [] + + # Iterate through each row of the adjacency matrix + for i in range(arr.shape[0]): + + # For each row, iterate through each column of the matrix + for j in range(arr.shape[1]): + + # If the entry in the adjacency matrix is greater than 0,5 (can change this threshold), then + # add an edge to the array of edges. Each edge is added by appending an array containing the + # source and target. + if arr[i, j] > threshold: + if type == 'dict': + edges.append({'data': {'id': str(i)+str(j), 'source': str(i), 'target': str(j)}, + 'classes': 'red'}) + elif type == 'array': + edges.append([i,j]) + elif type == 'tuple': + edges.append((i,j)) + + # Otherwise, if the input does not indicate an edge, move on to the nect entry in the + # adjacency matrix + else: + continue + + # Return the array of edges + return edges + + + +''' make_binary Documentation + ------------------------------- + This method takes in an adjacency matrix (array typle) and a threshold value (float type). It outputs a + new adjacency matrix such that all the entries are either 0 or 1.''' + +def make_binary(arr, threshold = 0): + # Initialize the new adjacency matrix by setting it equal to the original adjacency matrix + arr_altered = arr + + # Iterate through each row of the matrix + for i in range(0, arr_altered.shape[0]): + + # In each row, iterate through each column of the matrix + for j in range(arr_altered.shape[0]): + + # If the entry is greater than the indicated threshold, set the entry of the new adjacency + # matrix equal to 1 + if arr[i,j] > threshold: + arr_altered[i,j] = 1 + + # Otherwise, set the entry equal to 0 + else: + arr_altered[i,j] = 0 + + # Return the new adjacency matrix + return arr_altered + + + +''' plot_graph Documentation + ------------------------------- + This method takes in a plot and type of plot desired, then prints a plot of the graph.''' + +def plot_graph(G, type, effects_mark, nodeNames): + + # This type places the failure effects on one side and the failure modes on the other in vertical columns + if type == "bipartite": + pos = nx.bipartite_layout(G, nodeNames[:effects_mark]) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98", edgecolors="#c89679", node_shape="s") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed", edgecolors="#799dbd", node_shape="s") + + # This for loop places labels outside of the nodes for easier visualization + for i in pos: + x, y = pos[i] + if x <= 0: + plt.text(x-0.075,y,s=i, horizontalalignment='right') + + else: + plt.text(x+0.075,y,s=i, horizontalalignment='left') + + #nx.draw_networkx_labels(G, pos, horizontalalignment='center') + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type draws all the nodes in a circle + elif type == "circular": + pos = nx.circular_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type attempts to have all the edges the same length + elif type == "kamada_kawai": + pos = nx.kamada_kawai_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type attempts to have non-intersecting edges. This only works for planar graphs. + elif type == "planar": + pos = nx.planar_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type plots the graph in a random configuration + elif type == "random": + pos = nx.random_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type uses the eigenvectrs of the graph Laplacian in order to show possible clusters of nodes + elif type == "spectral": + pos = nx.spectral_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type treats nodes as repelling objects and edges as springs (causing them to moving in simulation) + elif type == "spring": + pos = nx.spring_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type places nodes in concentric circles + elif type == "shell": + pos = nx.shell_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type places nodes in spiral layout + elif type == "spiral": + pos = nx.spiral_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # This type places nodes in spiral layout + elif type == "multipartite": + + pos = nx.multipartite_layout(G) # positions for all nodes + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[:effects_mark], node_color="#98c5ed") + nx.draw_networkx_nodes(G, pos, nodelist=nodeNames[effects_mark:], node_color="#fabc98") + nx.draw_networkx_labels(G, pos) + nx.draw_networkx_edges(G, pos) + plt.box(False) + plt.show() + return + + # Plot the grpah with networkx and matplotlib using regular algorithm + nx.draw(G, with_labels=True, font_weight='bold') + plt.show() + return + + + +''' max_degrees Documentation + -------------------------------- + This method takes in an adjacency matrix, the names of the nodes, a threshold for binarization + of the adjacency matrix, and a boolean for labeling the nodes with names (True) or numbers (False). + The output consists of three tuples: + 1. (the nodes with the highest out degree, value of the highest out degree) + 1. (the nodes with the highest in degree, value of the highest in degree) + 1. (the nodes with the highest overall degree, value of the highest overall degree)''' + +def max_degrees(arr, nodeNames, threshold = 0, name = False): + + # Create copy of the adjacency matrix so that we can alter it without losing information about + # our original adjacency matrix. + arr_altered = arr + + # Binarization of adjacency matrix. The threshold determines the cutoff for what will be labeled + # as a 1 versus a 0. Anything above the threshold will be a 1, and anything below the threshold + # will be set to 0. + for i in range(0, arr_altered.shape[0]): + for j in range(arr_altered.shape[0]): + if arr[i,j] > threshold: + arr_altered[i,j] = 1 + else: + arr_altered[i,j] = 0 + + # Calculating out degrees and the maximum of the out degrees + out_degrees = np.sum(arr_altered, axis=1) + max_out = np.where(out_degrees == max(out_degrees)) + + # Calculating in degrees and the maximum of the in degrees + in_degrees = np.sum(arr_altered, axis=0) + max_in = np.where(in_degrees == max(in_degrees)) + + # Calculating overall degrees and the maximum of the overall degrees + degrees = out_degrees + in_degrees + max_deg = np.where(degrees == max(degrees)) + + # If the user chooses to list the nodes by their proper name (rather than with numbers), then + # we index into the array of node names, nodeNames, to find the names of these nodes. We then + # return the three tuples about maximum out degree, in degree, and overall degree. + if name: + out_name = nodeNames[max_out[0].tolist()] + in_name = nodeNames[max_in[0].tolist()] + deg_name = nodeNames[max_deg[0].tolist()] + return (out_name, max(out_degrees)), (in_name, max(in_degrees)), (deg_name, max(degrees)) + + # Otherwise, if the user chooses not to label nodes with their proper names, then we return the + # three tuples about maximum out degree, in degree, and overall degree. Rather than listing the + # names of the nodes, their corresponding numbers are listed. + return (max_out, max(out_degrees)), (max_in, max(in_degrees)), (max_deg, max(degrees)) + + + +''' getProbabilities Documentation + ------------------------------- + This method inputs an Excel file and reads the probabilities from the file. We return an array of these + probabilities and an array of the node names (indexed the same as the probability array).''' + +def getProbabilities(fileName, sheetName = None): + df = pd.read_excel(fileName, sheet_name=sheetName) + + # Convert the data frame to a numpy array + arr = df.to_numpy() + + # Create an array of the names of the nodes for the graph. + nodeNames = arr[:, 0].flatten() + + # return the adjacency matrix without labels and an array of the names of the nodes + return arr[:, 1], nodeNames + + +'''diagonal_nodes Documentation + -------------------------------- + This method takes in an adjacency matrix and outputs a diagonal matrix. For an adjacency matrix of length k, the + outputted diagonal matrix places values 1-k on the diagonal.''' + +def diagonal_nodes(arr): + # Return the diagonal matrix with i+1 in the i,i spot and zero elsewhere + return np.diag([i+1 for i in range(arr.shape[0])]) + +'''cosine_similarity Documentation + ----------------------------------- + This method takes in two vectors and outputs the cosine similarity between them. In linear algebra, cosine + similarity is defined as the measure of how much two vectors are pointing in the same direction. It can also + be thought of cosine of the angle between the two vectors.''' + +def cosine_similarity(v1, v2): + # Find the dot product between the two vectors inputted + dot_prod = np.transpose(v1) @ v2 + + # Calculate and return the cosine similarity of the two vectors + return (dot_prod) / ((np.sqrt(np.sum(np.square(v1))) * np.sqrt(np.sum(np.square(v2))))) + + + +''' make_undirected_unweighted Documentation + --------------------------------------------- + This method takes in an adjacency matrix and threshold. It outputs an adjusted adjacency matrix that + corresponds to the undirected and unweighted graph of the one inputted.''' + +def make_undirected_unweighted(arr, threshold = 0): + # Make sure that the array is a numpy array + arr = np.array(arr) + + # Add the array to the transpose of itself. This makes the graph undirected. + arr = arr + arr.T + + # Use the method from graphBuilder.py to binarize the matrix. This makes the graph unweighted. + make_binary(arr, threshold) + + # Since the array now only consists of zeros and ones, make sure that it is of integer type + arr = arr.astype(int) + + # Return the adjusted matrix + return arr \ No newline at end of file diff --git a/failure/searchMethods.py b/failure/searchMethods.py new file mode 100644 index 00000000..b4fbb357 --- /dev/null +++ b/failure/searchMethods.py @@ -0,0 +1,429 @@ +import numpy as np +import networkx as nx +import matplotlib.pyplot as plt +from failure.graphBuilder import * + +''' searchMethods-------------------------------------------------------------------------------------------------- + + ****************************** Last Updated: 10 April 2024 ****************************** + + Methods: + 1) breadth_first_child: inputs adjacency matrix, names of nodes, start node --> outputs tree, adjacency matrix, + dictionary of nodes and generations, list of 'effect' nodes, list of 'mode' nodes + + 2) breadth_first_parent: inputs adjacency matrix, names of nodes, start node --> outputs tree, adjacency matrix, + dictionary of nodes and generations, list of 'effect' nodes, list of 'mode' nodes + + 3) draw_bfs_multipartite: nputs adjacency matrix, list of node names, start node --> plots breadth first tree. + Nothing is returned. + + 4) breadth_first_multi: adjacency matrix, list of node names, starting node, type of breadth first search --> + outputs tree, adjacency matrix, dictionary of nodes, arrays for effects and modes, and array of nodes + +-------------------------------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------------------------------------------- + + + breadth_first_child Documentation + ----------------------------------- + This method takes in an adjacency matrix, list of node names, and an integer indicating te starting node. We + traverse through the graph. For each generation, starting from the source node, we find and add all the children + of the nodes in this generation, unless the node has aldready been placed in the tree. As such, we create a tree. + This method returns this tree, the tree's adjacency matrix, a dictionary of nodes, and arrays for effects and modes. + + --> Note: a tree is a graph with no cycles or loops. It does not have to be directed, but it is acyclic.''' + +def breadth_first_child(arr, nodeNames, source): + # Initialize a new digraph for the tree we are going to make and add all the nodes (we will determine edges later) + G = nx.DiGraph() + + # Initialize effects and modes arrays for plotting purposes + effects = [] + modes = [] + + # Add the source node to the graph + G.add_node(nodeNames[source - 1]) + + # Determine if the source node is an effect or a mode, and add it to the correct array + if source < 27: effects.append(nodeNames[source - 1]) + elif source <= 47: modes.append(nodeNames[source - 1]) + + '''if source < 49: modes.append(nodeNames[source - 1]) + else: effects.append(nodeNames[source - 1])''' + + # Note that we are changing the numerical names of the nodes so that the values of nodes are from 1 to 47 rather + # than from 0 to 46. This is so we can find all the children of the nodes. Using the 0-46 range poses a problem + # since we do not know if a zero indicates no relation or that the 0 node is a child of the node we are looking at. + # We binarize the adjacency matrix so that relationships either exist or not (rather than having an intensity) + adj = make_binary(arr, 0.5).astype(int) + + # Create an array of booleans such that the ith entry indicates if the node has already been visited. Nodes + # that have been visited are set to TRUE. + nodeList = np.reshape(np.repeat(False, arr.shape[0]), (arr.shape[0], 1)) + + # Create a diagonal matrix such that the value of the i,i entry is 1+1, referencing the node with name i+1 + nodes = diagonal_nodes(adj) + # print("nodes", nodes.shape) + + # Visit the source node + nodeList[source-1] = True + + # Add the source node to the queue. We use a queue to iterate through the nodes. This allows us to search through + # the graph generation by generation rather than following one specific path at a time. + queue = [[source, 0]] + + # Initialize a dictionary that will tell us what generation each node is in. Label the generation of the source node + # as zero. + gens = {nodeNames[source-1]: {"layer": 0}} + + # Continue while there are still nodes in the queue (reference algorithms for a breadth first search for more info) + while len(queue) > 0: + # Set the node we are looking at equal to the node next in line in the queue + current = queue[0] + + # Determine the children of the current node. + # Proof/Reason Why This Works --> Using the unweighted adjacency matrix, we can find the children + # by multiplying the row of the adjacency matrix corresponding to the current node by the diagonal matrix of + # node names. If the jth node is a child of the current node, then there is a 1 in the j-1 column of the + # current row. When multiplied by the diagonal matrix, this 1 will be multiplied by the j-1 column of the. + # since the only non zero entry in the j-1 column of the diagonal matrix is j (located on the diagonal), then + # the entry in the j-1 column of the returned vector will be j. However, if the jth node is not a child of + # the current node, then there is a zero in the j-1 column of the current row of the adjacency matrix. When + # multiplied by the diagonal matrix, this zero is multiplied by every entry in the j-1 column of the diagonal + # matrix. So, the j-1 column of the returned vector is zero. + + children_bool = adj[current[0]-1] @ nodes # vector of zeros and child names (numerical names) + children = children_bool[np.nonzero(children_bool)] #list of just the child names (numerical names) + + for child in children: # For every child of the current node that was found above... + # Check if the child has been visited or if it is in the same generation as child. If either not visited or + # in the same generation, continue with the following: + if nodeList[child - 1] == False or gens[nodeNames[child - 1]]['layer'] > current[1]: + + # Add the child to the graph + G.add_node(nodeNames[child-1]) + + # Determine if the child is an effect or mode and add it to the correct array + if child < 27: effects.append(nodeNames[child - 1]) + elif child <= 47: modes.append(nodeNames[child - 1]) + '''if child < 49: modes.append(nodeNames[child - 1]) + else: effects.append(nodeNames[child - 1])''' + + # Add an edge between the current node and child in the tree we are building + G.add_edge(nodeNames[current[0]-1], nodeNames[child-1]) + + queue.append([child, current[1]+1]) # Append the child to the queue + nodeList[child - 1] = True # Change the status of the child to say we have visited it + gens.update({nodeNames[child-1]: {"layer": current[1]+1}}) # Add child to dictionary of nodes + + # Remove the current node from the queue + queue = queue[1:] + + # Return the tree we made, its adjacency matrix, the dictionary of nodes, the effects array, and the modes arrat + return G, nx.to_numpy_array(G), gens, effects, modes + + + +''' breadth_first_parent Documentation + --------------------------------------- + This method takes in an adjacency matrix, list of node names, and an integer indicating te starting node. We + traverse through the graph. For each generation, starting from the source node, we find and add all the parents + of the nodes in this generation, unless the node has aldready been placed in the tree. As such, we create a tree. + This method returns this tree, the tree's adjacency matrix, a dictionary of nodes, and arrays for effects and modes.''' + +def breadth_first_parent(arr, nodeNames, source): + # Initialize a new digraph for the tree we are going to make and add all the nodes (we will determine edges later) + # Initialize a graph in Networkx + G = nx.DiGraph() + + # Initialize effects and modes arrays for plotting purposes + effects = [] + modes = [] + names_of_nodes = [] + + # Add the source node to the graph + G.add_node(nodeNames[source - 1]) + names_of_nodes.append(nodeNames[source - 1]) + + # Determine if the source node is an effect or a mode, and add it to the correct array + '''if source < 27: effects.append(nodeNames[source - 1]) + else: modes.append(nodeNames[source - 1])''' + if source < 49: modes.append(nodeNames[source - 1]) + else: effects.append(nodeNames[source - 1]) + + # Binarize the adjacency matrix + arr2 = arr.copy() + adj = make_binary(arr2).astype(int) + + # Create an array of booleans such that the ith entry indicates if the node has already been visited. Nodes + # that have been visited are set to TRUE. + nodeList = np.reshape(np.repeat(False, arr.shape[0]), (arr.shape[0], 1)) + + # Create a diagonal matrix such that the value of the i,i entry is 1+1, referencing the node with name i+1 + nodes = diagonal_nodes(adj) + + # Visit the source node + nodeList[source-1] = True + + # Add the source node to the queue. We use a queue to iterate through the nodes. This allows us to search through + # the graph generation by generation rather than following one specific path at a time. + queue = [[source, 100]] + + # Initialize a dictionary that will tell us what generation each node is in. Label the generation of the source node + # as zero. + gens = {nodeNames[source-1]: {"layer": 100}} + + # Continue while there are still nodes in the queue (reference algorithms for a breadth first search for more info) + while len(queue) > 0: + # Set the node we are looking at equal to the node next in line in the queue + current = queue[0] + + # Determine the parents of the current node. + parents_bool = nodes @ adj[:, current[0]-1] # vector of zeros and parent names (numerical names) + parents = parents_bool[np.nonzero(parents_bool)] #list of just the parent names (numerical names) + + for parent in parents: # For every child of the current node that was found above... + # Check if the parent has been visited or if it is in the same generation as parent. If either not visited or + # in the same generation, continue with the following: + if nodeList[parent - 1] == False or gens[nodeNames[parent - 1]]['layer'] < current[1]: + + # Add the parent to the graph + G.add_node(nodeNames[parent-1]) + + # Determine if the parent is an effect or mode and add it to the correct array + if parent < 27: effects.append(nodeNames[parent- 1]) + else: modes.append(nodeNames[parent - 1]) + '''if parent < 49: modes.append(nodeNames[parent- 1]) + else: effects.append(nodeNames[parent - 1])''' + + # Add an edge between the current node and parent in the tree we are building + G.add_edge(nodeNames[parent-1], nodeNames[current[0]-1]) + + queue.append([parent, current[1]-1]) # Append the parent to the queue + nodeList[parent - 1] = True # Change the status of the parent to say we have visited it + gens.update({nodeNames[parent-1]: {"layer": current[1]-1}}) # Add parent to dictionary of nodes + + # Remove the current node from the queue + queue = queue[1:] + + # Return the tree we made, its adjacency matrix, the dictionary of nodes, the effects array, and the modes arrat + return G, nx.to_numpy_array(G), gens, effects, modes + + + +''' draw_bfs_multipartite Documentation + --------------------------------------- + This method inputs an adjacency matrix, list of node names, and a start node. We use the breadth first searh to + find generations of nodes starting from the start node. This method plots the resulting graph from the breadth + first search. Nothing is returned.''' + +def draw_bfs_multipartite(arr, nodeNames, start, type = "child", multi_turbine = False): + # Obtain the graph, adjacency matrix, dictionary, effects, and modes from breadth_first_tree + if type == "child": + G, arr, gens, effects, modes = breadth_first_child(arr, nodeNames, start) + elif type == "parent": + G, arr, gens, effects, modes = breadth_first_parent(arr, nodeNames, start) + elif type == "multi-child": + G, arr, gens, effects, modes, non = breadth_first_multi(arr, nodeNames, start, "child") + else: + G, arr, gens, effects, modes, non = breadth_first_multi(arr, nodeNames, start, "parent") + + # Give each node the attribute of their generation + nx.set_node_attributes(G, gens) + # print(effects) + + if multi_turbine: + effect_colors = ["#ffd6ed", "#ffb3ba", "#ffdfba", "#ffffba", "#baffc9", "#bae1ff", "#b1adff", "#e4adff", "#e5e5e5", "#e8d9c5"] + mode_colors = ["#e5c0d5", "#e5a1a7", "#e5c8a7", "#e5e5a7", "#a7e5b4", "#a7cae5", "#9f9be5", "#cd9be5", "#cecece", "#d0c3b1"] + pos = nx.multipartite_layout(G, subset_key='layer') + for node in G.nodes: + # print(int(str(node)[0])) + if str(node) in effects: + nx.draw_networkx_nodes(G, pos, nodelist=[node], node_color = effect_colors[int(str(node)[0])], node_size=750, node_shape="s") + else: + nx.draw_networkx_nodes(G, pos, nodelist=[node], node_color = effect_colors[int(str(node)[0])], node_size=750) + nx.draw_networkx_labels(G, pos, font_size=5, verticalalignment='center_baseline') + nx.draw_networkx_edges(G, pos, arrowsize=20) + plt.box(False) + plt.show() + else: + # Plot the graph + pos = nx.multipartite_layout(G, subset_key='layer') + nx.draw_networkx_nodes(G, pos, nodelist=effects, node_color="#98c5ed", node_size=2700, edgecolors="#799dbd", node_shape="s") + nx.draw_networkx_nodes(G, pos, nodelist=modes, node_color="#fabc98", node_size=2700, edgecolors="#c89679", node_shape="s") + nx.draw_networkx_labels(G, pos, font_size=10, verticalalignment='center_baseline') + nx.draw_networkx_edges(G, pos, arrowsize=60) + plt.box(False) + plt.show() + + # Nothing is returned + return + + + +'''breadth_first_multi Documentation + ----------------------------------- + This method takes in an adjacency matrix, list of node names, an integer indicating te starting node, and a string + that indicates which type of breadth first search we are conducting (child or parent). We traverse through the graph. + For each generation, starting from the source nodes (handles multiple start nodes), we find and add all the children/parents + of the nodes in this generation, unless the node has aldready been placed in the tree. As such, we create a tree. + This method returns this tree, the tree's adjacency matrix, a dictionary of nodes, arrays for effects and modes, and array + of nodes (in the order they are added).''' + +def breadth_first_multi(arr, nodeNames, sources, type_poc): + # Function that determines how value of the layer changes, depending on the type of calculation + def layer_fcn(layer, type_poc): + if type_poc == "child": + return layer + 1 + elif type_poc == "parent": + return layer - 1 + # Determining if the current generation is after the former node's generation, depending on the type of calculation + def same_layer(layer1, layer2, type_poc): + if type_poc == "child": + if layer1 > layer2: + return True + else: + return False + elif type_poc == "parent": + if layer1 < layer2: + return True + else: + return False + + # Initialize a new digraph for the tree we are going to make and add all the nodes (we will determine edges later) + G = nx.DiGraph() + + # Initialize arrays for tracking nodes + effects = [] + modes = [] + names_of_nodes = [] + adj = make_binary(arr).astype(int) + nodeList = np.reshape(np.repeat(False, arr.shape[0]), (arr.shape[0], 1)) + nodes = diagonal_nodes(adj) + queue = [] + gens = {} + + # Initialize value for the first layer created + if type_poc == "child": + layer_val = 0 + else: + layer_val = 100 + + # Add the source node to the graph + for start in sources: + G.add_node(nodeNames[start - 1]) + + # Determine if the source node is an effect or a mode, and add it to the correct array + if start < 27: effects.append(nodeNames[start - 1]) + else: modes.append(nodeNames[start - 1]) + + '''if start < 49: modes.append(nodeNames[start - 1]) + else: effects.append(nodeNames[start - 1])''' + + # Visit the source node + nodeList[start-1] = True + names_of_nodes.append(nodeNames[start - 1]) + + # Add the source node to the queue. We use a queue to iterate through the nodes. This allows us to search through + # the graph generation by generation rather than following one specific path at a time. + queue.append([start, 0]) + + # Initialize a dictionary that will tell us what generation each node is in. Label the generation of the source node + # as zero. + gens.update({nodeNames[start-1]: {"layer": layer_val}}) + # print("non", names_of_nodes) + + # Continue while there are still nodes in the queue (reference algorithms for a breadth first search for more info) + while len(queue) > 0: + # Set the node we are looking at equal to the node next in line in the queue + current = queue[0] + + if type_poc == "child": + # Determine the children of the current node. + children_bool = adj[current[0]-1] @ nodes # vector of zeros and child names (numerical names) + children = children_bool[np.nonzero(children_bool)] #list of just the child names (numerical names) + else: + children_bool = nodes @ adj[:, current[0]-1] # vector of zeros and parent names (numerical names) + children = children_bool[np.nonzero(children_bool)] #list of just the parent names (numerical names) + + + for child in children: # For every child of the current node that was found above... + # Check if the child has been visited or if it is in the same generation as child. If either not visited or + # in the same generation, continue with the following: + # print("child", child) + + if nodeList[child - 1] == False or same_layer(gens[nodeNames[child - 1]]['layer'], current[1], type_poc): + + # Add the child to the graph, and to the array of node names + G.add_node(nodeNames[child-1]) + if nodeNames[child - 1] in names_of_nodes: + x = 14 + else: + names_of_nodes.append(nodeNames[child - 1]) + + # Determine if the child is an effect or mode and add it to the correct array + '''if child < 27: effects.append(nodeNames[child - 1]) + else: modes.append(nodeNames[child - 1])''' + if child < 49: modes.append(nodeNames[child - 1]) + else: effects.append(nodeNames[child - 1]) + + # Add an edge between the current node and child in the tree we are building + if type_poc == "child": + G.add_edge(nodeNames[current[0]-1], nodeNames[child-1]) + else: + G.add_edge(nodeNames[child-1], nodeNames[current[0]-1]) + + queue.append([child, layer_fcn(current[1], type_poc)]) # Append the child to the queue + nodeList[child - 1] = True # Change the status of the child to say we have visited it + gens.update({nodeNames[child-1]: {"layer": layer_fcn(current[1], type_poc)}}) # Add child to dictionary of nodes + + # Remove the current node from the queue + queue = queue[1:] + + # Return the tree we made, its adjacency matrix, the dictionary of nodes, the effects array, and the modes arrat + return G, nx.to_numpy_array(G), gens, effects, modes, names_of_nodes + + +'''# **** Below code is still being worked on ***** + +arr, nodeNames = excel2Matrix("failureData.xlsx", "bigMatrix") +source = 11 +G = nx.DiGraph() + +effects = [] +modes = [] + +G.add_node(nodeNames[source - 1]) + +if source < 49: modes.append(nodeNames[source - 1]) +else: effects.append(nodeNames[source - 1]) + +adj = make_binary(arr).astype(int) + +nodeList = np.reshape(np.repeat(False, arr.shape[0]), (arr.shape[0], 1)) +nodes = diagonal_nodes(adj) + +nodeList[source-1] = True +queue = [[source, 0]] +gens = {nodeNames[source-1]: {"layer": 0}} + +while len(queue) > 0: + current = queue[0] + + children_bool = adj[current[0]-1] @ nodes # vector of zeros and child names (numerical names) + children = children_bool[np.nonzero(children_bool)] #list of just the child names (numerical names) + + for child in children: + if nodeList[child - 1] == False or gens[nodeNames[child - 1]]['layer'] > current[1]: + G.add_node(nodeNames[child-1]) + + if child < 27: effects.append(nodeNames[child - 1]) + else: modes.append(nodeNames[child - 1]) + + G.add_edge(nodeNames[current[0]-1], nodeNames[child-1]) + + queue.append([child, current[1]+1]) # Append the child to the queue + nodeList[child - 1] = True # Change the status of the child to say we have visited it + gens.update({nodeNames[child-1]: {"layer": current[1]+1}}) # Add child to dictionary of nodes + + queue = queue[1:]''' \ No newline at end of file diff --git a/failure/twoTurbineCaseStudy.py b/failure/twoTurbineCaseStudy.py new file mode 100644 index 00000000..82189fed --- /dev/null +++ b/failure/twoTurbineCaseStudy.py @@ -0,0 +1,76 @@ +import numpy as np +from failure.graphBuilder import * +from failure.searchMethods import * + +''' twoTurbineCaseStudy.py ----------------------------------------------------------------------------------------------- + + ****************************** Last Updated: 18 April 2024 ****************************** + + Methods: + 1) twoTurbine_bayesian_table: input adjusted adjacency matrix, adjacency matrix, current node, list of node names, + adjusted list of node names --> outputs list of parents, probability table + +----------------------------------------------------------------------------------------------------------------------- +----------------------------------------------------------------------------------------------------------------------- + + + twoTurbine_bayesian_table Documentation + ------------------------------------------ + This method inputs an adjusted adjacency matrix, regular adjacency matrix, current node (integer), array of node names (strings), + and adjusted node names (same indexing as adjusted adjacency matrix). We then calculate the probability table for the current node + given its parents in the adjusted adjacency matrix. We use the weights from the regular adjacency matrix to determine the transitional + probabilities between nodes. We output an array of the current node's parents and its probability table.''' + + +def twoTurbine_bayesian_table(a, arr, current, nodeNames, non): + # arr, nodeNames = excel2Matrix("ExcelFiles/failureData.xlsx", "twoTurbines_simplified") # For debugging, feel free to uncomment + adj = a.copy() + nodes = diagonal_nodes(adj) # Diagonal matrix of node's numerical names +1 + adj = make_binary(adj, 0) # Binarize adjacency matrix + parents_bool = nodes @ adj[:, current-1] # vector of zeros and parent names (numerical names) + parents = list(parents_bool[np.nonzero(parents_bool)]) # list of just the parent names (numerical names) + prob_table = np.zeros((2 ** len(parents), len(parents) + 2)) # Initialize the array for the node's conditional probability distribution + + current_index = np.where(nodeNames == non[int(current - 1)])[0][0] + + for iteration in range(2 ** len(parents)): # For each combination of parents having either failed or not + true_false_array = [] # Iniitalize to record which parents have failed + + for p in range(len(parents)): + parent_index = np.where(nodeNames == non[int(parents[p] - 1)])[0][0] + prob_table[iteration][p] = int(iteration / (2 ** p)) % 2 # Append whether or not the parent node has failed + prob_table[iteration][-2] += (1- int(iteration / (2 ** p)) % 2) * arr[parent_index][current_index] # Determine parent probability (given if the parent has failed or not) + true_false_array.append((int(iteration / (2 ** p)) % 2)) # Append whether or not the parent node has failed + + if prob_table[iteration][-2] > 1: # If the value is greater than 1, set the probability to 1 + prob_table[iteration][-2] = 1 + prob_table[iteration][-1] = 1 - prob_table[iteration][-2] # Add the probability of the node not failing to the array + # print(prob_table) + return parents, prob_table + + + +''' +# The following code is for writing excel file with all pair-wise probabilities. To run, copy and paste the code below into main.py and hit 'Run.' + +adjacency_matrix, nodeNames = excel2Matrix("ExcelFiles/failureData.xlsx", "twoTurbines_simplified") +current = 1 +midpoint = True +num_iterations = 1 +probabilities, nodeNames = getProbabilities("ExcelFiles/failureProbabilities.xlsx", sheetName = "Conditional Probabilities (2)") +all_probs = np.zeros(adjacency_matrix.shape) +with pd.ExcelWriter("bayesian_simulations3.xlsx") as writer: + for i in range(1, len(nodeNames) + 1): + print("----------------------------------", i, "----------------------------------") + array_of_probs = np.zeros((len(nodeNames), 2)) + for j in range(1,len(nodeNames)+1): + adjacency_matrix2 = adjacency_matrix.copy() + probabilities = np.array(probabilities).copy() + A, B = backward_bayesian_inference(adjacency_matrix2, nodeNames, [i], [i], [j], probabilities, start_bool = True, multi = False, poc="child", twoTurbine = True) + array_of_probs[j-1] = A + df = pd.DataFrame(np.array(array_of_probs)) + df.to_excel(writer, sheet_name="Probabilities given "+str(i)) + all_probs[:,i-1] = array_of_probs[:,0] + df2 = pd.DataFrame(np.array(all_probs)) + df2.to_excel(writer, sheet_name="allProbs") +''' \ No newline at end of file diff --git a/failure/zzz_failureGraphs.py b/failure/zzz_failureGraphs.py new file mode 100644 index 00000000..15e0c58b --- /dev/null +++ b/failure/zzz_failureGraphs.py @@ -0,0 +1,526 @@ +import numpy as np +import networkx as nx +import matplotlib.pyplot as plt +import pandas as pd +from failureProbabilities import * +from twoTurbineCaseStudy import * +from famodel.project import Project + + +def addMoreEdges(nearby_platforms, G, Array, node, node_id, failures_c, failures_p, ids): + node_index = np.where(nodeNames == node)[0][0] + platforms = list(Array.platformList.keys()) + if len(nearby_platforms) > 1: + platforms = nearby_platforms + cables = list(Array.cableList.keys()) + all_ids = [] + combos = [] + for p in platforms: + for c in cables: + if c in p: + combos.append(str(p) + ',' + str(c)) + all_ids.append(c) + for m in Array.platformList[p]: + combos.append(str(m) + ',' + str(c)) + all_ids.append(str(m)) + print("ATTACHMENTS: ", Array.platformList[p].attachments) + for a in Array.platformList[p].anchorList: + all_ids.append(a) + all_ids = platforms+all_ids+list(combos) + for child in failures_c[node]: + child_index = np.where(nodeNames == child)[0][0] + original_ids = ids + if arr[node_index, child_index] > 1.5: + ids = all_ids + for id in ids: + if child + "\n" + str(id) in G.nodes: + G.add_edge(node + "\n" + str(node_id), child + "\n" + str(id), weight=0.001) + ids = original_ids + for parent in failures_p[node]: + parent_index = np.where(nodeNames == parent)[0][0] + original_ids = ids + if arr[parent_index, node_index] > 1.5: + ids = all_ids + for id in ids: + if parent + "\n" + str(id) in G.nodes: + G.add_edge(parent + "\n" + str(id), node + "\n" + str(node_id), weight=0.001) + ids = original_ids + return G + +def edgeReweight(G, edge): + new_weight = input("Enter weight for (" + str(edge[0].replace("\n", " ")) + ", " + str(edge[1].replace("\n", " ")) + ") (press \'Enter\' for default value)") + if new_weight=='': + new_weight=0.01 + G[edge[0]][edge[1]]['weight']=float(new_weight) + return G + +def get_y_Value(point1, point2, x): + return ((point2[1]-point1[1])/(point2[0]-point1[0]))*(x-point1[0])+point1[1] + +def get_min_max_vals(pnt1, pnt2, angle_radians): + vector = pnt2 - pnt1 + length = math.sqrt(vector[0]**2 + vector[1]**2) + if vector[0] == 0.0 and vector[1] > 0: angle = math.pi/2 + elif vector[0] == 0.0 and vector[1] < 0: angle = 3*math.pi/2 + elif vector[0] == 0.0 and vector[1] == 0: angle = 0.0 + else: angle = math.atan(vector[1]/vector[0]) + if angle == 0 and vector[0] < 0: + angle = math.pi + new_vector1 = np.array([length * math.cos(angle - angle_radians), length * math.sin(angle - angle_radians)]) + np.array(pnt1) + new_vector2 = np.array([length * math.cos(angle + angle_radians), length * math.sin(angle + angle_radians)]) + np.array(pnt1) + if angle == math.pi and angle_radians==0: + new_vector1 = np.array([length * math.cos(angle - angle_radians), 0]) + np.array(pnt1) + new_vector2 = np.array([length * math.cos(angle + angle_radians), 0]) + np.array(pnt1) + max_x = max([new_vector1[0], new_vector2[0], pnt1[0], pnt2[0]]) + min_x = min([new_vector1[0], new_vector2[0], pnt1[0], pnt2[0]]) + max_y = max([new_vector1[1], new_vector2[1], pnt1[1], pnt2[1]]) + min_y = min([new_vector1[1], new_vector2[1], pnt1[1], pnt2[1]]) + if max_x == min_x: + if max_x < 0: max_x = 0 + elif min_x > 0: min_x = 0 + return max_x, min_x, max_y, min_y + +# Create project class instance from yaml file +Array = Project(file='famodel/OntologySample600m.yaml') + +# Create adjacency matrix from failure matrix +df = pd.read_excel("/Users/eslack/Documents/Code/ExcelFiles/failureData.xlsx", sheet_name="bMMatch") +arr = df.to_numpy()[:,1:] +nodeNames = df.to_numpy()[:, 0].flatten() + +# Get initial failure probabilities for each failure mode and effect +probabilities, array_of_probs = getProbabilities("failureProbabilities.xlsx", "Sheet3") + + + +# Determine if using the twoTurbine case study model +tT_input = input("Would you like to calculate for the twoTurbine case study? ") +if 'y' in tT_input: twoTurbine = True +else: twoTurbine = False + +# Determine angle of clashing we are interested in +angle_degree = float(input("What angle do you want to use? (in degrees) ")) +angle_radians = angle_degree/360 * math.pi * 2 + +# Determine what the nearby platforms are (True ==> all other platforms, False ==> physically close turbines) +nba_input = input("Would you like one turbine to directly affect all turbines? ") +nba = False +if 'y' in nba_input.lower(): + nba = True + + + +# Initialize and create the dictionaries of the children, parents, and probabilities for each failure +failures_c = {} +failures_p = {} +probability_dict = {} + +for i in range(arr.shape[0]): + node_children = [] + node_parents = [] + for j in range(arr.shape[1]): + if arr[i,j] > 0: + node_children.append(nodeNames[j]) + if arr[j,i] > 0: + node_parents.append(nodeNames[j]) + failures_c.update({nodeNames[i]: node_children}) + failures_p.update({nodeNames[i]: node_parents}) + probability_dict.update({nodeNames[i]: probabilities[i]}) + +# Create dictionary for each subsystem of failures +turbine = [0,1,2,3,4,5,26,27,28,29] +platform = [6,7,8,9,10,11,30,31,32] +clashing = [12,13,14] +mooring_materials = [33,34,35] +mooring = [15,16,17, 18] +connector = [36] +clump_weight = [37] +anchor = [19,20,38,39] +cable = [21,22,23,41,42, 45,46] +cable_mat = [43,44] +grid = [24,25] +buoyancy = [40] + +systems = {'turbine':nodeNames[turbine], 'platform':nodeNames[platform], 'clashing':nodeNames[clashing], 'moor_mat':nodeNames[mooring_materials], + 'mooring':nodeNames[mooring], 'connector':nodeNames[connector], 'weight':nodeNames[clump_weight], + 'anchor':nodeNames[anchor], 'cable':nodeNames[cable], 'grid':nodeNames[grid], 'cable_mat':nodeNames[cable_mat], + 'buoyancy':nodeNames[buoyancy]} + + +# Initialize graph, boolean for plotting, and list of probabilities +G = nx.DiGraph() +plot = False +probabilities = [] + +for platform in Array.platformList: + # Initialize list of mooring-mooring clashing nodea and list of mooring-cable and anchor-cable clashing nodes + mooring_clashes = [] + cable_clashes = [] + + # Determine the nearby platforms + nearby_platforms = [] + for platform2 in Array.platformList: + platform_xy = Array.platformList[platform].r + platform2_xy = Array.platformList[platform2].r + dist_btwn_turbines = math.sqrt((platform_xy[0] - platform2_xy[0])**2 + (platform_xy[1] - platform2_xy[1])**2) + if dist_btwn_turbines <= (1600 * math.sqrt(2)): + nearby_platforms.append(platform2) + if 'y' in nba_input: + nearby_platforms = [] + + # Create platform nodes + for platform_failure in systems['platform']: + if not(platform_failure + "\n" + str(platform) in list(G.nodes)): + probabilities.append(probability_dict[platform_failure]) + G.add_node(platform_failure + "\n" + str(platform)) + G = addMoreEdges(nearby_platforms, G, Array, platform_failure, platform, failures_c, failures_p, [platform]) + + # Create turbine nodes + for turbine_failure in systems['turbine']: + if not(turbine_failure + "\n" + str(platform) in list(G.nodes)): probabilities.append(probability_dict[turbine_failure]) + G.add_node(turbine_failure + "\n" + str(platform)) + G = addMoreEdges(nearby_platforms, G, Array, turbine_failure, platform, failures_c, failures_p, [platform]) + + print(Array.platformList[platform].getTopLevelEdge) + for anchor in Array.platformList[platform].anchorList: + # Create anchor nodes + for anchor_failure in systems['anchor']: + if len(Array.platformList[platform].anchorList[anchor].mooringList) > 1 and "ingle" in anchor_failure: continue + elif len(Array.platformList[platform].anchorList[anchor].mooringList) <= 1 and "hared" in anchor_failure: continue + if not(anchor_failure + "\n" + str(anchor) in list(G.nodes)): probabilities.append(probability_dict[anchor_failure]) + G.add_node(anchor_failure + "\n" + str(anchor)) + G = addMoreEdges(nearby_platforms, G, Array, anchor_failure, anchor, failures_c, failures_p, [platform, anchor]) + + for mooring in Array.platformList[platform].anchorList[anchor].mooringList: + # Create mooring nodes based on specific materials + for section in Array.platformList[platform].anchorList[anchor].mooringList[mooring].dd["sections"]: + for material in systems['moor_mat']: + if ((("ope" in section["type"]["material"]) and ("ire" in material)) or (("oly" in section["type"]["material"]) and ("ynth" in material)) )or (("hain" in section["type"]["material"]) and ("hain" in material)): + if not(material + "\n" + str(mooring) in list(G.nodes)): probabilities.append(probability_dict[material]) + G.add_node(material + "\n" + str(mooring)) + G = addMoreEdges(nearby_platforms, G, Array, material, mooring, failures_c, failures_p, [platform, anchor, mooring]) + + # Create other mooring nodes + for mooring_failure in systems['mooring']: + if (Array.platformList[platform].anchorList[anchor].mooringList[mooring].shared) and ("ing line non" in mooring_failure.replace("\n", " ")): continue + elif (not Array.platformList[platform].anchorList[anchor].mooringList[mooring].shared) and ("ared line" in mooring_failure.replace("\n", " ")): continue + else: + if not(mooring_failure + "\n" + str(mooring) in list(G.nodes)): probabilities.append(probability_dict[mooring_failure]) + G.add_node(mooring_failure + "\n" + str(mooring)) + G = addMoreEdges(nearby_platforms, G, Array, mooring_failure, mooring, failures_c, failures_p, [platform, anchor, mooring]) + + # Create mooring-clashing nodes + for mooring2 in Array.platformList[platform].anchorList[anchor].mooringList: + if not(mooring == mooring2): + print('not shared', mooring, mooring2) + mooring_pnt1 = Array.platformList[platform].anchorList[anchor].mooringList[mooring].rA[:2] + mooring_pnt2 = Array.platformList[platform].anchorList[anchor].mooringList[mooring].rB[:2] + mooring2_pnt1 = Array.platformList[platform].anchorList[anchor].mooringList[mooring2].rA[:2] + mooring2_pnt2 = Array.platformList[platform].anchorList[anchor].mooringList[mooring2].rB[:2] + mMaxX, mMinX, mMaxY, mMinY = get_min_max_vals(mooring_pnt1, mooring_pnt2, angle_radians) + cMaxX, cMinX, cMaxY, cMinY = get_min_max_vals(mooring2_pnt1, mooring2_pnt2, angle_radians) + x_overlap = (cMaxX < mMaxX and cMaxX > mMinX) or (cMinX > mMinX and cMinX < mMaxX) + y_overlap = (cMaxY < mMaxY and cMaxY > mMinY) or (cMinY > mMinY and cMinY < mMaxY) + x_on_top = (cMaxX <= mMaxX and cMaxX >= mMinX) and (cMinX >= mMinX and cMinX <= mMaxX) + y_on_top = (cMaxY <= mMaxY and cMaxY >= mMinY) and (cMinY >= mMinY and cMinY <= mMaxY) + if (x_overlap and y_overlap) or (x_on_top and y_on_top): + if not(systems['clashing'][0] + "\n" + str(mooring) + "," + str(mooring2) in list(G.nodes)): + probabilities.append(probability_dict[systems['clashing'][0]]) + G.add_node(systems['clashing'][0] + "\n" + str(mooring) + "," + str(mooring2)) + G = addMoreEdges(nearby_platforms, G, Array, systems['clashing'][0], str(mooring) + str(mooring2), failures_c, failures_p, [platform, anchor, mooring, mooring2]) + mooring_clashes.append(str(mooring) + str(mooring2)) + + # Assign connector failures + for connector in Array.platformList[platform].anchorList[anchor].mooringList[mooring].dd['connectors']: + for connector_failure in systems['connector']: + if not(connector_failure + "\n" + str(mooring) in list(G.nodes)): probabilities.append(probability_dict[connector_failure]) + G.add_node(connector_failure + "\n" + str(mooring)) + G = addMoreEdges(nearby_platforms, G, Array, connector_failure, mooring, failures_c, failures_p, [platform, anchor, mooring, connector]) + + # Create cable-clashing nodes + for cable in Array.cableList: + if platform in Array.cableList[cable].dd['platforms']: + + # Cable failures + for cable_failure in systems['cable']: + if 'disconnect' in cable_failure.replace('\n',' ').lower(): + if not(cable_failure + "\n" + str(cable) + ' ' + str(platform) in list(G.nodes)): + probabilities.append(probability_dict[cable_failure]) + G.add_node(cable_failure + "\n" + str(platform)+','+str(cable)) + G = addMoreEdges(nearby_platforms, G, Array, cable_failure, str(platform)+','+str(cable), failures_c, failures_p, [platform, anchor, mooring, cable]) + else: + if not(cable_failure + "\n" + str(cable) in list(G.nodes)): + probabilities.append(probability_dict[cable_failure]) + G.add_node(cable_failure + "\n" + str(cable)) + G = addMoreEdges(nearby_platforms, G, Array, cable_failure, cable, failures_c, failures_p, [platform, anchor, mooring, cable]) + + # Cable buoyancy modules and materials + for cable_section in Array.cableList[cable].dd['cables']: + # Create buoyancy module nodes (one node per cable) + if 'buoyancy_sections' in cable_section.dd: + n_modules = cable_section.dd['buoyancy_sections'][0]['N_modules'] + for buoy_failure in systems['buoyancy']: + if not(buoy_failure + "\n" + str(cable) in list(G.nodes)): + probabilities.append(probability_dict[buoy_failure]) + G.add_node(buoy_failure + "\n" + str(cable)) + G = addMoreEdges(nearby_platforms, G, Array, buoy_failure, cable, failures_c, failures_p, [platform, anchor, mooring, cable]) + + # Create either dynamic or static cable failure node (one of each per cable at a maximum) + if cable_section.dd['cable_type']['dynamic']: + if not(systems['cable_mat'][0] + "\n" + str(cable) in list(G.nodes)): + probabilities.append(probability_dict[systems['cable_mat'][0]]) + G.add_node(systems['cable_mat'][0] + "\n" + str(cable)) + G = addMoreEdges(nearby_platforms, G, Array, systems['cable_mat'][0], cable, failures_c, failures_p, [platform, anchor, mooring, cable]) + else: + if not(systems['cable_mat'][1] + "\n" + str(cable) in list(G.nodes)): + probabilities.append(probability_dict[systems['cable_mat'][1]]) + G.add_node(systems['cable_mat'][1] + "\n" + str(cable)) + G = addMoreEdges(nearby_platforms, G, Array, systems['cable_mat'][1], cable, failures_c, failures_p, [platform, anchor, mooring, cable]) + + # Cable clashing failures + cable_pnt1 = np.array(Array.platformList[Array.cableList[cable].dd['platforms'][0]].r) + if not ('substation' in Array.cableList[cable].dd['platforms'][1].lower()): + cable_pnt2 = np.array(Array.platformList[Array.cableList[cable].dd['platforms'][1]].r) + else: cable_pnt2 = np.array(Array.substationList[Array.cableList[cable].dd['platforms'][1]].r) + mooring_pnt1 = Array.platformList[platform].anchorList[anchor].mooringList[mooring].rA[:2] + mooring_pnt2 = Array.platformList[platform].anchorList[anchor].mooringList[mooring].rB[:2] + cMaxX, cMinX, cMaxY, cMinY = get_min_max_vals(cable_pnt1, cable_pnt2, angle_radians) + mMaxX, mMinX, mMaxY, mMinY = get_min_max_vals(mooring_pnt1, mooring_pnt2, angle_radians) + x_overlap = (cMaxX < mMaxX and cMaxX > mMinX) or (cMinX > mMinX and cMinX < mMaxX) + y_overlap = (cMaxY < mMaxY and cMaxY > mMinY) or (cMinY > mMinY and cMinY < mMaxY) + x_on_top = (cMaxX <= mMaxX and cMaxX >= mMinX) and (cMinX >= mMinX and cMinX <= mMaxX) + y_on_top = (cMaxY <= mMaxY and cMaxY >= mMinY) and (cMinY >= mMinY and cMinY <= mMaxY) + if (x_overlap and y_overlap) or ((cMaxX == mMaxX and cMinX == mMinX)and(cMaxY == mMaxY and cMinY == mMinY)): + for cable_clashing_failure in systems['clashing'][1:]: + if not(cable_clashing_failure + "\n" + str(cable)+str(mooring) in list(G.nodes)): + probabilities.append(probability_dict[cable_clashing_failure]) + G.add_node(cable_clashing_failure + "\n" + str(cable)+str(mooring)) + G = addMoreEdges(nearby_platforms, G, Array, cable_clashing_failure, str(cable)+str(mooring), failures_c, failures_p, [platform, cable, anchor, mooring, str(cable)+str(mooring)]) + cable_clashes.append(str(cable)+str(mooring)) + if len(cable_clashes) < 1: + if not(systems['clashing'][1] + "\n" + str(platform) + "," + str(cable) in list(G.nodes)): + probabilities.append(probability_dict[systems['clashing'][1]]) + G.add_node(systems['clashing'][1] + "\n" + str(platform) + "," + str(cable)) + G = addMoreEdges(nearby_platforms, G, Array, systems['clashing'][1], str(platform) + ',' + str(cable), failures_c, failures_p, [platform, anchor, cable, mooring, str(cable) + ' ' + str(platform)]) + if not(systems['clashing'][2] + "\n" + str(platform) + ',' + str(cable) in list(G.nodes)): + probabilities.append(probability_dict[systems['clashing'][2]]) + G.add_node(systems['clashing'][2] + "\n" + str(platform) + ',' + str(cable)) + G = addMoreEdges(nearby_platforms, G, Array, systems['clashing'][2], str(platform) + ',' + str(cable), failures_c, failures_p, [platform, anchor, cable, mooring, str(cable) + ' ' + str(platform)]) + + # If there are no specific mooring-mooring clashing, create a generic mooring-mooring clashing node for the platform + if len(mooring_clashes) < 1: + fail_list = [platform, anchor] + for a_variable in Array.platformList[platform].mooringList.keys(): + fail_list.append(a_variable) + if not(systems['clashing'][0] + "\n" + str(platform) in list(G.nodes)): + probabilities.append(probability_dict[systems['clashing'][0]]) + G.add_node(systems['clashing'][0] + "\n" + str(platform)) + G = addMoreEdges(nearby_platforms, G, Array, systems['clashing'][0], platform, failures_c, failures_p, fail_list) + + # Shared mooring line failures + for mooring in Array.platformList[platform].mooringList: + if Array.platformList[platform].mooringList[mooring].shared: + for mooring_failure in systems['mooring']: + if not twoTurbine or 'shared line' in mooring_failure.replace('\n', ' ').lower(): + if not(mooring_failure + "\n" + str(mooring) in list(G.nodes)): + probabilities.append(probability_dict[mooring_failure]) + G.add_node(mooring_failure + "\n" + str(mooring)) + G = addMoreEdges(nearby_platforms, G, Array, mooring_failure, mooring, failures_c, failures_p, [platform, mooring]) + + # Create mooring nodes based on specific materials + for section in Array.platformList[platform].mooringList[mooring].dd["sections"]: + for material in systems['moor_mat']: + if ((("ope" in section["type"]["material"]) and ("ire" in material)) or (("oly" in section["type"]["material"]) and ("ynth" in material)) )or (("hain" in section["type"]["material"]) and ("hain" in material)): + if not(material + "\n" + str(mooring) in list(G.nodes)): probabilities.append(probability_dict[material]) + G.add_node(material + "\n" + str(mooring)) + G = addMoreEdges(nearby_platforms, G, Array, material, mooring, failures_c, failures_p, [platform, mooring]) + + # Create mooring clashing nodes + for mooring2 in Array.platformList[platform].anchorList[anchor].mooringList: + if not(mooring == mooring2): + mooring_pnt1 = np.array(Array.platformList[platform].mooringList[mooring].rA[:2]) + mooring_pnt2 = np.array(Array.platformList[platform].mooringList[mooring].rB[:2]) + mooring2_pnt1 = np.array(Array.platformList[platform].mooringList[mooring2].rA[:2]) + mooring2_pnt2 = np.array(Array.platformList[platform].mooringList[mooring2].rB[:2]) + mMaxX, mMinX, mMaxY, mMinY = get_min_max_vals(mooring_pnt1, mooring_pnt2, angle_radians) + cMaxX, cMinX, cMaxY, cMinY = get_min_max_vals(mooring2_pnt1, mooring2_pnt2, angle_radians) + x_overlap = (cMaxX < mMaxX and cMaxX > mMinX) or (cMinX > mMinX and cMinX < mMaxX) + y_overlap = (cMaxY < mMaxY and cMaxY > mMinY) or (cMinY > mMinY and cMinY < mMaxY) + x_on_top = (cMaxX <= mMaxX and cMaxX >= mMinX) and (cMinX >= mMinX and cMinX <= mMaxX) + y_on_top = (cMaxY <= mMaxY and cMaxY >= mMinY) and (cMinY >= mMinY and cMinY <= mMaxY) + if (x_overlap and y_overlap) or (x_on_top or y_on_top): + if not(systems['clashing'][0] in list(G.nodes)): probabilities.append(probability_dict[systems['clashing'][0]]) + G.add_node(systems['clashing'][0] + "\n" + str(mooring) + "," + str(mooring2)) + G = addMoreEdges(nearby_platforms, G, Array, systems['clashing'][0], str(mooring) + "," + str(mooring2), failures_c, failures_p, [platform, mooring, mooring2]) + mooring_clashes.append(str(mooring) + str(mooring2)) + + # Cable clashing failures + for cable in Array.cableList: + if platform in Array.cableList[cable].dd['platforms']: + cable_pnt1 = np.array(Array.platformList[Array.cableList[cable].dd['platforms'][0]].r) + if not ('substation' in Array.cableList[cable].dd['platforms'][1].lower()): + cable_pnt2 = np.array(Array.platformList[Array.cableList[cable].dd['platforms'][1]].r) + else: cable_pnt2 = np.array(Array.substationList[Array.cableList[cable].dd['platforms'][1]].r) + + if not ('substation' in mooring[0].lower()):mooring_pnt1 = np.array(Array.platformList[mooring[0]].r) + else: mooring_pnt1 = np.array(Array.substationList[mooring[0]].r) + if not ('substation' in mooring[1].lower()): mooring_pnt2 = np.array(Array.platformList[mooring[1]].r) + else: mooring_pnt2 = np.array(Array.substationList[mooring[1]].r) + + x_overlap = (cMaxX < mMaxX and cMaxX > mMinX) or (cMinX > mMinX and cMinX < mMaxX) + y_overlap = (cMaxY < mMaxY and cMaxY > mMinY) or (cMinY > mMinY and cMinY < mMaxY) + cMaxX, cMinX, cMaxY, cMinY = get_min_max_vals(cable_pnt1, cable_pnt2, angle_radians) + mMaxX, mMinX, mMaxY, mMinY = get_min_max_vals(mooring_pnt1, mooring_pnt2, angle_radians) + if (x_overlap and y_overlap) or ((cMaxX == mMaxX and cMinX == mMinX)and(cMaxY == mMaxY and cMinY == mMinY)): + for cable_clashing_failure in systems['clashing'][1:]: + if not (systems['clashing'][1] in list(G.nodes)): probabilities.append(probability_dict[systems['clashing'][1]]) + G.add_node(systems['clashing'][1] + "\n" + str(mooring) + "," + str(cable)) + G = addMoreEdges(nearby_platforms, G, Array, systems['clashing'][1], str(mooring) + "," + str(cable), failures_c, failures_p, [platform, cable, mooring, str(mooring) + "," + str(cable)]) + G = addMoreEdges(nearby_platforms, G, Array, systems['clashing'][1], str(platform) + "," + str(cable), failures_c, failures_p, [platform, cable, mooring, str(platform) + "," + str(cable)]) + cable_clashes.append(str(cable)+str(mooring)) + + # Grid/substation failures + for grid_failure in systems['grid']: + if not(grid_failure in list(G.nodes)): + probabilities.append(probability_dict[grid_failure]) + G.add_node(grid_failure + "\n" + "") + G = addMoreEdges(nearby_platforms, G, Array, grid_failure, "", failures_c, failures_p, [platform]) + + + +# If interested in the twoTurbine case study, alter a few of the failure nodes +if twoTurbine: + list_of_failures_to_rid = ['Tether & anchor systems array_cable10', 'Cable protection system array_cable10', + 'Terminations array_cable10', 'Offshore joints array_cable10'] + for failure in list(G.nodes): + if 'connector' in failure.replace('\n', ' ').lower(): + G.remove_node(failure) + if 'buoyancy modules' in failure.replace('\n', ' ').lower(): + G.remove_node(failure) + if failure.replace('\n', ' ') in list_of_failures_to_rid: + G.remove_node(failure) + + + +# Print the number of nodes and (if the user wants) the list of nodes +print('\nNumber of nodes -',len(G.nodes),'\n') +user_input3 = input("Would you like to see the list of failures? ") +if 'y' in user_input3.lower() or 'rue' in user_input3.lower(): + for edge in list(G.nodes): + print(edge.replace("\n", " ")) + +# Print the number of nodes and (if the user wants) the list of edges +print('\nNumber of edges -',len(G.edges),'\n') +user_input4 = input("Would you like to see the list of edges? ") +if 'y' in user_input4.lower() or 'rue' in user_input4.lower(): + itervar = 0 + for edge in list(G.edges): + print(edge) + itervar += 1 + if (itervar + 1) % 1000 == 0: user_input45 = input("Continue? ") + + + +# If the user wants to input probabilities for specific edges, reweight edges based on the user's inputs +user_inputs = input("Would you like to input probabilities into adjacency matrix? ") +twoTurbine_calculationType = False +if (user_inputs == 'y' or user_inputs == 'yes') or user_inputs == 'True': + twoTurbine_calculationType = True + for i in range(len(G.edges)): + edge = list(G.edges)[i] + if ('rift off' in edge[0].replace("\n", " ") or 'ncreased' in edge[0].replace("\n", " ")) or 'ynamics' in edge[0].replace("\n", " "): + G = edgeReweight(G, edge) + elif ('apsize' in edge[0].replace("\n", " ") or '-cable' in edge[0].replace("\n", " ")) or 'ing line non' in edge[0].replace("\n", " "): + G = edgeReweight(G, edge) + elif ('ragging' in edge[0].replace("\n", " ") or 'hain' in edge[0].replace("\n", " ")) or 'ire rope' in edge[0].replace("\n", " "): + G = edgeReweight(G, edge) + elif ('ynthetic' in edge[0].replace("\n", " ") or 'able profile' in edge[0].replace("\n", " ")) or 'ared line' in edge[0].replace("\n", " "): + G = edgeReweight(G, edge) + elif ('load on cable' in edge[0].replace("\n", " ") or 'eight' in edge[0].replace("\n", " ")): + G = edgeReweight(G, edge) + + + +# Ask user if they are ready to continue to Bayesian network calculations (if not, quit) +continue_input = input("Ready to continue? ") +if 'n' in continue_input.lower(): + quit() + + +# Bayesian network calculation +arr = nx.to_numpy_array(G) +nodeNames = np.reshape(np.array(list(G.nodes)), (len(list(G.nodes)), )) + +poc = "child" +filename = "turbineInference_" + poc + "FAModelTwoTurbine" + "_" + str(nba_input) + ".xlsx" +for start_component in range(1,arr.shape[0]+1): # Iterate through each failure mode/effect in turbine + print(start_component) + +with pd.ExcelWriter(filename) as writer: + all_probabilities = np.zeros(arr.shape) # Initialize a large array to put all the pairwise probabilities in + for start_component in range(1,arr.shape[0]+1): # Iterate through each failure mode/effect in turbine + a = arr.copy() + non = nodeNames + K, a, g, e, m, non = breadth_first_multi(a, nodeNames, [start_component], poc) # Generate tree for Bayesian network + prblts = [] # Initialize array of node probabilities (in order of appearance in graph) + for node in non: + node_index = np.where(nodeNames == node)[0][0] + prblts.append(probabilities[node_index]) # Add nodes to array of node probabilities + prblts = np.array(prblts) + + probabilitiy_table = np.zeros((2, a.shape[0])) # Initialize table of inference probabilities + nodes = diagonal_nodes(a) # Diagonal matrix of node names (numerical +1) + a = make_binary(a, 0.5) # Binarize adjacency table + + nodeNamesArray = np.array(list(K.nodes)) # Initialize array of names of nodes + nodeNamesArray = np.reshape(nodeNamesArray, (len(nodeNamesArray),)) # Make numpy array + + # Interence----------------------------------------------------------------------- + for node in range(a.shape[0]): + pts_bool = nodes @ a[:, node] # vector of zeros and child names (numerical names) + pts = pts_bool[np.nonzero(pts_bool)] #list of just the child names (numerical names) + + if len(pts) < 1: # If no parents, add probability of failure happening to the probability table + probabilitiy_table[0][node] = probabilities[np.where(nodeNames == nodeNamesArray[int(node)])[0][0]] + probabilitiy_table[1][node] = 1 - probabilities[np.where(nodeNames == nodeNamesArray[int(node)])[0][0]] + continue + + if twoTurbine_calculationType: + parents, our_table = twoTurbine_bayesian_table(a, arr, node + 1, nodeNames, non) # Calculate the probability distribution table + else: + parents, our_table = bayesian_table(a, node+1, True, nodeNames, True, prblts) + mlt_table = np.ones((our_table.shape[0],2)) # Initialize table for multiplying across rows of probability distribution table + + # Calculate Probability Table ------------------------------------------------------------ + for i in range(our_table.shape[0]): + for j in range(our_table.shape[1] - 2): + parent = int(parents[j]) + if our_table[i,j] == 0: + our_table[i,j] = probabilitiy_table[0][parent - 1] + if probabilitiy_table[0][parent - 1] == 0: + break + else: + our_table[i,j] = probabilitiy_table[1][parent - 1] + if (parent-1 == 0): # If the node's parent is the evidence, zero out the non-failing possibility + our_table[i,j] = 0 + mlt_table[i,0] *= our_table[i,j] # Multiply the probabilities across the probability distribution table + mlt_table[i,1] = mlt_table[i,0] * our_table[i, -1] # Multiple by the probability of event not failing given combination of parent failure + mlt_table[i,0] *= our_table[i, -2] # Multiple by the probability of event failing given combination of parent failure + sm_table = np.sum(mlt_table, axis = 0) #/np.sum(mlt_table) # Sum the products of probabilities across the columns + probabilitiy_table[0][node] = sm_table[0] # Update the inference probability table with the probabilites just calculated + probabilitiy_table[1][node] = sm_table[1] + + # Print and add probability of node to table + print(start_component, node, " --> Probability of ", nodeNamesArray[node].replace("\n", " "), "=", sm_table) + index2 = np.where(nodeNames == nodeNamesArray[node])[0][0] + all_probabilities[0 * arr.shape[0] + start_component - 1][0 * arr.shape[0] + index2] = sm_table[0]/np.sum(sm_table) + + # Write array to dataframe + df3 = pd.DataFrame(all_probabilities) + df3.to_excel(writer, sheet_name="allProbs") + + +# If we want to plot the network, plot it now +if plot: + nx.draw_networkx(G) + plt.show() \ No newline at end of file diff --git a/failureGraphs.py b/failureGraphs.py new file mode 100644 index 00000000..be6d36f6 --- /dev/null +++ b/failureGraphs.py @@ -0,0 +1,654 @@ +import numpy as np +import networkx as nx +import matplotlib.pyplot as plt +import pandas as pd +from failure.failureProbabilities import * +from failure.twoTurbineCaseStudy import * +from famodel.project import Project + + +class failureGraph(): + def __init__(self, project_file): + # Create project class instance from yaml file + self.Array = Project(file=project_file) + self.G = nx.DiGraph() + + self.critical_failures = None + self.criticality_type = None + self.imput_and_susceptibility_table = None + self.mean_is = None + + + def create_failureGraph(self, matrix_file, matrix_sheet, probabilities_file, probability_sheet): + '''Create a graph of failures based on the FAModel Project object + Parameters + ---------- + matrix_file : string + Failure matrix file that encodes interaction between failure modes and effects + matrix_sheet : string + Name of sheet in Excel file to pull the failure matrix from + probabilities_file : string + File name for list of failure rates (iniital probabilities) of all failures + probability_sheet : string + Name of sheet in Excel file to pull the failure probabilities from + ''' + + print("\nBegin generating failure graph...") + # Create adjacency matrix from failure matrix + df = pd.read_excel(matrix_file, sheet_name=matrix_sheet) + arr = df.to_numpy()[:,1:] + nodeNames = df.to_numpy()[:, 0].flatten() + + # Get initial failure probabilities for each failure mode and effect + probabilities, array_of_probs = getProbabilities(probabilities_file, probability_sheet) + init_prob_dict = {} + for prob_index in range(len(probabilities)): + init_prob_dict.update({nodeNames[prob_index]: probabilities[prob_index]}) + + # Determine angle of clashing we are interested in + angle_degree = input("What angle do you want to use? (in degrees) ") + if angle_degree == '': self.angle_radians = 0.0 + else: self.angle_radians = float(angle_degree)/360 * math.pi * 2 + + # Initialize and create the dictionaries of the children and parents + self.failures_c = {} + self.failures_p = {} + for i in range(arr.shape[0]): + node_children = [] + node_parents = [] + for j in range(arr.shape[1]): + if arr[i,j] > 0: + node_children.append(nodeNames[j]) + if arr[j,i] > 0: + node_parents.append(nodeNames[j]) + self.failures_c.update({nodeNames[i]: node_children}) + self.failures_p.update({nodeNames[i]: node_parents}) + + # Get the systems (groups of failures by component type) and list of nodes that could impact the FAModel + systems = self.get_systems(nodeNames) + impacts = self.get_impact_nodes(nodeNames) + + # Initialize graph, boolean for plotting, and list of probabilities + self.G = nx.DiGraph() + + # FIRST DEGREE NODES ------------------------------------------------------------------------------------------- + for platform in self.Array.platformList: + attachments = self.Array.platformList[platform].attachments + failure_probabilities = self.Array.platformList[platform].failure_probability + platform_obj = self.Array.platformList[platform] + nearby_platforms = [] + mooring_clashes = [] + cable_clashes = [] + num_cables = [] + + # Create platform failure nodes + for platform_failure in systems['platform']: + if platform_failure in failure_probabilities.keys(): fail_prob = failure_probabilities[platform_failure] + else: fail_prob = init_prob_dict[platform_failure] + self.G.add_node(platform_failure + "\n" + str(platform), probability=fail_prob, obj=[platform_obj], impacts=impacts[platform_failure]) + self.G = self.addMoreEdges(platform_failure, platform, [platform]) + + # Create failure nodes + for turbine_failure in systems['turbine']: + if turbine_failure in failure_probabilities.keys(): fail_prob = failure_probabilities[turbine_failure] + else: fail_prob = init_prob_dict[turbine_failure] + self.G.add_node(turbine_failure + "\n" + str(platform), probability=fail_prob, obj=[platform_obj], impacts=impacts[turbine_failure]) + self.G = self.addMoreEdges(turbine_failure, platform, [platform]) + + # FIRST DEGREE EDGES ------------------------------------------------------------------------------------------- + for attach1 in attachments.keys(): + attach1_name = str(attachments[attach1]['id']) + attach1_type = '' + failure_probabilities = attachments[attach1]['obj'].failure_probability + if 'mooring' in str(type(attachments[attach1]['obj'])): + if attachments[attach1]['obj'].shared: attach1_type = 'sharedmooring' + else: attach1_type = 'mooring' + elif 'cable' in str(type(attachments[attach1]['obj'])): + attach1_type = 'cable' + num_cables.append(attachments[attach1]['obj']) + + # Create moroing/cable failure nodes + for attach1_failure in systems[attach1_type]: + original_name = attach1_name + if 'connect' in attach1_failure: attach1_name = platform + attach1_name + if attach1_failure in failure_probabilities.keys(): fail_prob = failure_probabilities[attach1_failure] + else: fail_prob = init_prob_dict[attach1_failure] + self.G.add_node(attach1_failure + "\n" + attach1_name, probability=fail_prob, obj=[attachments[attach1]['obj']], impacts=impacts[attach1_failure]) + self.G = self.addMoreEdges(attach1_failure, attach1_name, [platform, attach1_name]) + attach1_name = original_name + + # Create clashing failure nodes + for attach2 in attachments.keys(): + attach2_name = str(attachments[attach2]['id']) + attach2_type = '' + clash_name = str(attach1_name)+str(attach2_name) + if 'mooring' in str(type(attachments[attach2]['obj'])): attach2_type = 'mooring' + elif 'cable' in str(type(attachments[attach2]['obj'])): attach2_type = 'cable' + for clash_failure in systems[(str(attach1_type)+str(attach2_type))]: + if 'shared' in attach1_type and all(abs(np.array(attachments[attach1]['obj'].rB[:2]) - np.array(attachments[attach2]['obj'].rB[:2])) < 100): reverse = True + else: reverse = False + if self.couldClash(clash_failure, attachments[attach1]['obj'], attachments[attach2]['obj'], reverse): + if clash_failure in failure_probabilities.keys(): fail_prob = failure_probabilities[clash_failure] + else: fail_prob = init_prob_dict[clash_failure] + self.G.add_node(clash_failure + "\n" + clash_name, probability=fail_prob, obj=[attachments[attach1]['obj'], attachments[attach2]['obj']], impacts=impacts[clash_failure]) + self.G = self.addMoreEdges(clash_failure, clash_name, [platform, attach1_name, attach2_name, clash_name]) + if attach1_type == 'mooring' and attach2_type == attach1_type: mooring_clashes.append(clash_failure + "\n" + clash_name) + elif ('shared' not in attach1_type) and ('shared' not in attach2_type): cable_clashes.append(clash_failure + "\n" + clash_name) + + # SUBNODES AND SUBEDGES ------------------------------------------------------------------------------------ + subcomponents = attachments[attach1]['obj'].subcomponents + component_num = 0 + for component in subcomponents: + failure_probabilities = component.failure_probability + component_num += 1 + if 'mooring' in attach1_type: + if 'type' in component.keys(): + # Create clump weight failure nodes + if 'str' in str(type(component['type'])) and 'weight' in component['type']: + component_type = 'weight' + component_name = str(attach1_name + ' ' + component['type'] + ' ' + str(component_num)) + + # Create mooring material failure nodes + if 'dict' in str(type(component['type'])): + if 'polyester' in component['type']['material']: component_type = 'polyester' + elif 'chain' in component['type']['material']: component_type = 'chain' + elif 'rope' in component['type']['material']: component_type = 'rope' + component_name = attach1_name + ' ' + str(component['type']['name']) + + # Create connector failure nodes + else: + component_type = 'connector' + component_name = attach1_name + ' connector' + + elif 'cable' in attach1_type: + # Create dynamic cable section failure nodes + if 'dynamic' in str(type(component)): + component_type = 'dynamic' + component_name = str(component.id) + + # Create static cable section failure nodes + elif 'static' in str(type(component)): + component_type = 'static' + component_name = str(component.id) + + # Create offshore Joints failure nodes + elif 'joint' in str(type(component)).lower(): + component_type = 'joints' + component_name = attach1_name + ' ' + str(component.id) + + for component_failure in systems[component_type]: + if component_failure in failure_probabilities: fail_prob = failure_probabilities[component_failure] + else: fail_prob = init_prob_dict[component_failure] + self.G.add_node(component_failure + "\n" + component_name, probability=fail_prob, obj=[component], impacts=impacts[component_failure]) + self.G = self.addMoreEdges(component_failure, component_name, [platform, attach1]) + + + # SECOND ORDER NODES ------------------------------------------------------------------------------------------- + attached_to = attachments[attach1]['obj'].attached_to + for attach1A in attached_to: + attach1A_name = str(attach1A.id) + failure_probabilities = attach1A.failure_probability + + # Create anchor failure nodes + if 'anchor' in str(type(attach1A)): + attach1A_type = 'anchor' + if len(attach1A.attachments) > 1: attach1A_type = 'sharedanchor' + for anchor_failure in systems[attach1A_type]: + if anchor_failure in attach1A.failure_probability.keys(): fail_prob = failure_probabilities[anchor_failure] + else: fail_prob = init_prob_dict[anchor_failure] + self.G.add_node(anchor_failure + "\n" + attach1A_name, probability=fail_prob, obj=[attach1A], impacts=impacts[anchor_failure]) + self.G = self.addMoreEdges(anchor_failure, attach1A_name, [platform, attach1_name, attach1A_name]) + + # Create edges between platforms + elif 'platform' in str(type(attach1A)): + attach1A_type = 'platform' + attach1A_name = attach1A.id + for platform_failure in systems['platform']: + self.G = self.addMoreEdges(platform_failure, platform, [platform, attach1A_name]) + + # Create substation/grid failure nodes + elif 'substation' in str(type(attach1A)): + attach1A_type = 'substation' + for grid_failure in systems['grid']: + if grid_failure in failure_probabilities.keys(): fail_prob = failure_probabilities[grid_failure] + else: fail_prob = init_prob_dict[grid_failure] + self.G.add_node(grid_failure + "\n" + attach1A_name, probability=fail_prob, obj=[attach1A], impacts=impacts[grid_failure]) + self.G = self.addMoreEdges(grid_failure, attach1A_name, [platform, attach1_name, attach1A_name]) + + # Create mooring-mooring clashing failure node if no two mooring lines likely to clash + failure_probabilities = self.Array.platformList[platform].failure_probability + if len(mooring_clashes) < 1: + if systems['mooringmooring'][0] in failure_probabilities.keys(): fail_prob = failure_probabilities[systems['mooringmooring'][0]] + else: fail_prob = init_prob_dict[systems['mooringmooring'][0]] + self.G.add_node(systems['mooringmooring'][0] + "\n" + str(platform), probability=fail_prob, obj=[platform_obj], impacts=impacts[systems['mooringmooring'][0]]) + self.G = self.addMoreEdges(systems['mooringmooring'][0], str(platform), [platform]) + + # Create cable-mooring clashing failure nodes if no cable and mooring pairing likely to clash + if len(cable_clashes) < 1: + for cable_num_obj in num_cables: + cable_num = cable_num_obj.id + for clashing_failure in systems['cablemooring']: + if clashing_failure in failure_probabilities.keys(): fail_prob = failure_probabilities[clashing_failure] + else: fail_prob = init_prob_dict[clashing_failure] + self.G.add_node(clashing_failure + "\n" + str(platform) + ' ' + str(cable_num), probability=fail_prob, obj=[platform_obj, cable_num], impacts=impacts[clashing_failure]) + self.G = self.addMoreEdges(clashing_failure, str(platform) + ' ' + str(cable_num), [platform]) + + + + def get_systems(self, nodeNames): + '''Create dictionary for each subsystem of failures + Parameters + ---------- + nodeNames : list + List of all the failure names to use to create dictionary of subsystems + ''' + # Systems and indices of corresponding failures in nodeNames + turbine = [0,1,2,3,4,5,26,27,28,29] + platform = [6,7,8,9,10,11,30,31,32] + mooringmooring = [12] + mooringcable = [13,14] + rope = [35] + polyester = [34] + chain = [33] + mooring = [15,16,17] + connector = [36] + clump_weight = [37] + anchor = [19,20,38] + cable = [21,22,23,40, 41,42, 45] + dynamic = [43] + static = [44] + grid = [24,25] + joints = [46] + buoyancy = [40] + sharedmooring = [15,16,18] + sharedanchor = [19,20,39] + + # Dictionary of systems and their failures + systems = {'turbine':nodeNames[turbine], 'platform':nodeNames[platform], 'mooringmooring':nodeNames[mooringmooring], 'rope':nodeNames[rope], + 'chain':nodeNames[chain],'polyester':nodeNames[polyester],'mooringcable':nodeNames[mooringcable], 'cablemooring':nodeNames[mooringcable], + 'mooring':nodeNames[mooring], 'connector':nodeNames[connector], 'weight':nodeNames[clump_weight], 'anchor':nodeNames[anchor], 'cable':nodeNames[cable], + 'grid':nodeNames[grid], 'dynamic':nodeNames[dynamic], 'static':nodeNames[static], 'buoyancy':nodeNames[buoyancy], 'cablecable': [], + 'sharedmooring':nodeNames[sharedmooring], 'sharedmooringcable':nodeNames[mooringcable], 'joints': nodeNames[joints], 'cablesharedmooring':nodeNames[mooringcable], + 'sharedmooringmooring':nodeNames[mooringmooring], 'mooringsharedmooring':nodeNames[mooringmooring], 'sharedanchor': nodeNames[sharedanchor]} + return systems + + + + def get_impact_nodes(self, nodeNames): + '''Create dictionary for each node that tells us if the node could impact the FAModel + Parameters + ---------- + nodeNames : list + List of all the failure names to use to create dictionary of subsystems + ''' + # List of nodes that could impact hte FAModel + could_impact = [2, 6, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 20, 21, 23] + impactful_nodes = nodeNames[could_impact] + + # Create dictionary of nodes that tells us if the node impacts the FAModel or not + impact_dict = {} + for node in nodeNames: + if node in impactful_nodes: impact_dict.update({node: True}) + else: impact_dict.update({node: False}) + return impact_dict + + + def get_critical_node(self, param): + '''Identify and return the critical failure(s) of the failure graph + Parameters + ---------- + param : string + Measurement for criticality (either initial probability, degree [in, out, or total], susceptibility, or impact) + ''' + nodalNames = np.array(list(self.G.nodes)) + self.criticality_type = param + critical_prob = [0, []] + + # Find critical failure for critical indicating the maximum initial probability + if 'prob' in param: + nodal_probabilities = nx.get_node_attributes(self.G, "probability") + for failure_node in nodal_probabilities: + if nodal_probabilities[failure_node] > critical_prob[0]: + critical_prob[0] = nodal_probabilities[failure_node] + critical_prob[1] = [failure_node] + elif nodal_probabilities[failure_node] == critical_prob[0]: + critical_prob[1].append(failure_node) + + # Find the critical failure for crtitical refering to the maximum degree (either in-degree, out-degree, or total-degree) + elif 'deg' in param: + out_deg, in_deg, deg = max_degrees(nx.to_numpy_array(self.G), nodalNames, threshold = 0, name = True) + if 'in' in param: + critical_prob[0] = in_deg[1] + critical_prob[1] = list(in_deg[0]) + elif 'out' in param: + critical_prob[0] = out_deg[1] + critical_prob[1] = list(out_deg[0]) + else: + critical_prob[0] = deg[1] + critical_prob[1] = list(deg[0]) + + # Find the crtitical failure for ctitical refering to the susceptibility or impact of a node + elif 'sus' in param or 'impact' in param: + max_impact, max_sus = self.get_susceptibility_and_impact() + if 'impact' in param: critical_prob = max_impact + elif 'sus' in param: critical_prob = max_sus + else: return + + self.critical_failures = critical_prob + return critical_prob + + + def get_susceptibility_and_impact(self): + '''Run Bayesian inference over the graph to find all conditional probabilities (probability of A given B for all failures A and B), then average to determine + the failure with the highest susceptibility and that with the highest impact + + Parameters + ---------- + None + ''' + print("\nStarting susceptibility and impact calculation... ") + # Create list of node names and adjacency matrix from the failure graph + nodeNamesArray = np.array(list(self.G.nodes)) + arr = nx.to_numpy_array(self.G) + + # If the user wants to input probabilities for specific edges, reweight edges based on the user's inputs + user_inputs = input("Would you like to input probabilities into adjacency matrix? ") + twoTurbine_calculationType = False + if (user_inputs == 'y' or user_inputs == 'yes') or user_inputs == 'True': + twoTurbine_calculationType = True + for i in range(len(self.G.edges)): + edge = list(self.G.edges)[i] + if ('rift off' in edge[0].replace("\n", " ") or 'ncreased' in edge[0].replace("\n", " ")) or 'ynamics' in edge[0].replace("\n", " "): self.G = self.edgeReweight(edge) + elif ('apsize' in edge[0].replace("\n", " ") or '-cable' in edge[0].replace("\n", " ")) or 'ing line non' in edge[0].replace("\n", " "): self.G = self.edgeReweight(edge) + elif ('ragging' in edge[0].replace("\n", " ") or 'hain' in edge[0].replace("\n", " ")) or 'ire rope' in edge[0].replace("\n", " "): self.G = self.edgeReweight(edge) + elif ('ynthetic' in edge[0].replace("\n", " ") or 'able profile' in edge[0].replace("\n", " ")) or 'ared line' in edge[0].replace("\n", " "): self.G = self.edgeReweight(edge) + elif ('load on cable' in edge[0].replace("\n", " ") or 'eight' in edge[0].replace("\n", " ")): self.G = self.edgeReweight(edge) + + # Ask user if they are ready to continue to Bayesian network calculations (if not, quit) + continue_input = input("Ready to continue? ") + if 'n' in continue_input.lower(): quit() + + # Bayesian network calculation + arr = nx.to_numpy_array(self.G) + nodeNames = np.reshape(np.array(list(self.G.nodes)), (len(list(self.G.nodes)), )) + all_probabilities = np.zeros(arr.shape) # Initialize a large array to put all the pairwise probabilities in + for start_component in range(1,len(list(self.G.nodes))+1): # Iterate through each failure mode/effect in turbine + a = arr.copy() + non = nodeNames + K, a, g, e, m, non = breadth_first_multi(a, nodeNames, [start_component], "child") # Generate tree for Bayesian network + prblts = [] # Initialize array of node probabilities (in order of appearance in graph) + for node in non: + prblts.append(self.G.nodes[node]['probability']) # Add nodes to array of node probabilities + prblts = np.array(prblts) + + probabilitiy_table = np.zeros((2, a.shape[0])) # Initialize table of inference probabilities + nodes = diagonal_nodes(a) # Diagonal matrix of node names (numerical +1) + a = make_binary(a, 0.5) # Binarize adjacency table + + nodeNamesArray = np.array(list(K.nodes)) # Initialize array of names of nodes + nodeNamesArray = np.reshape(nodeNamesArray, (len(nodeNamesArray),)) # Make numpy array + + # Interence----------------------------------------------------------------------- + for node in range(a.shape[0]): + pts_bool = nodes @ a[:, node] # vector of zeros and child names (numerical names) + pts = pts_bool[np.nonzero(pts_bool)] #list of just the child names (numerical names) + + if len(pts) < 1: # If no parents, add probability of failure happening to the probability table + probabilitiy_table[0][node] = self.G.nodes[list(self.G.nodes)[node]]['probability'] + probabilitiy_table[1][node] = 1 - self.G.nodes[list(self.G.nodes)[node]]['probability'] + continue + + if twoTurbine_calculationType: + parents, our_table = twoTurbine_bayesian_table(a, arr, node + 1, nodeNames, non) # Calculate the probability distribution table + else: + parents, our_table = bayesian_table(a, node+1, True, nodeNames, True, prblts) + mlt_table = np.ones((our_table.shape[0],2)) # Initialize table for multiplying across rows of probability distribution table + + # Calculate Probability Table ------------------------------------------------------------ + for i in range(our_table.shape[0]): + for j in range(our_table.shape[1] - 2): + parent = int(parents[j]) + if our_table[i,j] == 0: + our_table[i,j] = probabilitiy_table[0][parent - 1] + if probabilitiy_table[0][parent - 1] == 0: + break + else: + our_table[i,j] = probabilitiy_table[1][parent - 1] + if (parent-1 == 0): # If the node's parent is the evidence, zero out the non-failing possibility + our_table[i,j] = 0 + mlt_table[i,0] *= our_table[i,j] # Multiply the probabilities across the probability distribution table + mlt_table[i,1] = mlt_table[i,0] * our_table[i, -1] # Multiple by the probability of event not failing given combination of parent failure + mlt_table[i,0] *= our_table[i, -2] # Multiple by the probability of event failing given combination of parent failure + sm_table = np.sum(mlt_table, axis = 0) #/np.sum(mlt_table) # Sum the products of probabilities across the columns + probabilitiy_table[0][node] = sm_table[0] # Update the inference probability table with the probabilites just calculated + probabilitiy_table[1][node] = sm_table[1] + + # Print and add probability of node to table + print(start_component, node, " --> Probability of ", nodeNamesArray[node].replace("\n", " "), "=", sm_table) + index2 = np.where(nodeNames == nodeNamesArray[node])[0][0] + all_probabilities[0 * arr.shape[0] + start_component - 1][0 * arr.shape[0] + index2] = sm_table[0]/np.sum(sm_table) + + # Save the probabilities calcuated + self.imput_and_susceptibility_table = all_probabilities + + # Calculate and return highest impact and susceptibility + mean_impact = np.mean(all_probabilities, axis=1) + max_impact = np.max(mean_impact) + max_impact_index = np.where(mean_impact == max_impact)[0][0] + + mean_susceptibility = np.mean(all_probabilities, axis=0) + max_susceptibility = np.max(mean_susceptibility, axis=0) + max_impact_susceptibility = np.where(mean_impact == max_impact)[0][0] + + self.mean_is = np.hstack(np.reshape(mean_impact, (len(list(self.G.nodes)), 1)), np.reshape(mean_susceptibility, (len(list(self.G.nodes)), 1))) + return [max_impact, nodeNames[max_impact_index]], [max_susceptibility, nodeNames[max_impact_susceptibility]] + + + + def couldClash(self, failure, a1, a2, reverse): + '''Determine if two lines (either mooring or cable) could clash with each other + Parameters + ---------- + failure : string + Name of failure (so that we can check if failure node already exists) + a1 : object + Either cable or mooring object for the first line + a2 : object + Either cable or mooring object for the second line + reverse : boolean + Determines orientation of the first line (True if orientation of vector needs to be reversed, False otherwise) + ''' + # If the failure node already exists (perhaps by a different name), return that the lines cannot clash + if a1 == a2 or (failure + "\n" + str(a1.id) + str(a2.id) in list(self.G.nodes) or failure + "\n" + str(a2.id) + str(a1.id) in list(self.G.nodes)): return False + + # Obtain the (x,y) coordinates of the start and end points of the lines + a1_pnt1 = np.array(a1.rA[:2]) + a1_pnt2 = np.array(a1.rB[:2]) + a2_pnt1 = np.array(a2.rA[:2]) + a2_pnt2 = np.array(a2.rB[:2]) + + # Determine the boundaries of movement of the lines (boundaries form a rectangle) + a1MaxX, a1MinX, a1MaxY, a1MinY = self.get_min_max_vals(a1_pnt1, a1_pnt2, self.angle_radians, reverse) + a2MaxX, a2MinX, a2MaxY, a2MinY = self.get_min_max_vals(a2_pnt1, a2_pnt2, self.angle_radians, False) + + # If the rectangles overlap, return TRUE (the lines can clash). Else, return FALSE (the lines cannot clash) + overlap = False + for corner_x in [a1MaxX,a1MinX,]: + for corner_y in [a1MaxY, a1MinY]: + if (corner_x <= a2MaxX and corner_x >= a2MinX) and (corner_y <= a2MaxY and corner_y >= a2MinY): overlap = True + return overlap + + + + def addMoreEdges(self, node, node_id, ids): + '''Adds edges between current node and any already created nodes + Parameters + ---------- + node : string + Name of current failure + node_id : string + Name of current component that we are creating the failure for + ids : list of strings + List of other component ids for components whose failure nodes have already been created + ''' + # Create edges from current node to nodes already created (if created node is child of current node in reference matrix) + for child in self.failures_c[node]: + for id in ids: + if child + "\n" + str(id) in self.G.nodes: self.G.add_edge(node + "\n" + str(node_id), child + "\n" + str(id), weight=0.001) + + # Create edges from nodes already created to current node (if created node is parent of current node in reference matrix) + for parent in self.failures_p[node]: + for id in ids: + if parent + "\n" + str(id) in self.G.nodes: self.G.add_edge(parent + "\n" + str(id), node + "\n" + str(node_id), weight=0.001) + return self.G + + + + def edgeReweight(self, edge): + '''Ask user for new weight and set edge weight to user's input + Parameters + ---------- + edge : list of node names + Edge that we want to reweight, comprised of the two nodes that it connects + ''' + new_weight = input("Enter weight for (" + str(edge[0].replace("\n", " ")) + ", " + str(edge[1].replace("\n", " ")) + ") (press \'Enter\' for default value)") + if new_weight=='': new_weight=0.01 + self.G[edge[0]][edge[1]]['weight']=float(new_weight) + return self.G + + + + def get_min_max_vals(self, pnt2, pnt1, angle_radians, reverse): + '''Determine boundary of movement for a line (either cable or mooring) + Parameters + ---------- + pnt2 : list of floats + Point (x,y) of beginning of line + pn1 : list of floats + Point (x,y) of end of line + angle_radians : float + Angle of movement (in radians) + reverse : boolean + Determines orientation of the first line (True if orientation of vector needs to be reversed, False otherwise) + ''' + # If the vector's orientation needs to be reversed, reverse the order of the points + if reverse: + pnt_hold = pnt1 + pnt1 = pnt2 + pnt2 = pnt_hold + + # Find the vector that represents the line and the vector's length + vector = pnt2 - pnt1 + length = math.sqrt(vector[0]**2 + vector[1]**2) + + # Find the angle (in polar coordinates) of the vector + if vector[0] == 0.0 and vector[1] > 0: angle = math.pi/2 + elif vector[0] == 0.0 and vector[1] < 0: angle = 3*math.pi/2 + elif vector[0] == 0.0 and vector[1] == 0: angle = 0.0 + else: angle = math.atan(vector[1]/vector[0]) + if angle == 0 and vector[0] < 0: angle = math.pi + if (angle > -math.pi*0.5 and angle < math.pi*0.5) and vector[0] < 0: angle += math.pi + + # Add and subtraact the angle of motion (while vector is in polar coordinates) to create two new vectors and then convert them back to rectangular coordinates + new_vector1 = np.array([length * math.cos(angle - angle_radians), length * math.sin(angle - angle_radians)]) + np.array(pnt1) + new_vector2 = np.array([length * math.cos(angle + angle_radians), length * math.sin(angle + angle_radians)]) + np.array(pnt1) + if angle == math.pi and angle_radians==0: # Handle the issue of sin(0) not equal to 0 in python + new_vector1 = np.array([length * math.cos(angle - angle_radians), 0]) + np.array(pnt1) + new_vector2 = np.array([length * math.cos(angle + angle_radians), 0]) + np.array(pnt1) + + # Determine the bounds of the smallest rectangle which contains the original vector and the two new vectors + max_x = max([new_vector1[0], new_vector2[0], pnt1[0], pnt2[0]]) + min_x = min([new_vector1[0], new_vector2[0], pnt1[0], pnt2[0]]) + max_y = max([new_vector1[1], new_vector2[1], pnt1[1], pnt2[1]]) + min_y = min([new_vector1[1], new_vector2[1], pnt1[1], pnt2[1]]) + if max_x == min_x: + if max_x < 0: max_x = 0 + elif min_x > 0: min_x = 0 + return max_x, min_x, max_y, min_y + + + def enact_failures(self, failure): + '''Update the FAModel based on failure(s) occurring + Parameters + ---------- + failure : object + Object of failure you would like to enact + ''' + # Run simulation without failure + without_failure = self.Array + + # Run simulation with failure + with_failure = self.Array + + return without_failure, with_failure + + def update_critical_node(self, criticality_stipulation): + '''Determine and return new critical failures (presumably after the previous critical failure(s) occur) + Parameters + ---------- + criticality_stipulation : string + Specifies how to determine the new critical failures + ''' + # If multiple critical nodes allowed, give the option to use both of the above methods to find critical nodes + if ('child' in criticality_stipulation.lower() and 'global' in criticality_stipulation.lower()) or 'both' in criticality_stipulation.lower(): + clist1 = self.get_child_critical_node() + clist2 = self.get_critical_node(self.criticality_type) + self.critical_failures[0] = (clist1[0] + clist2[0])/2 + self.critical_failures[1] = clist1[1] + clist2[1] + + # Find new critical failure (such that the new critical failure must be a direct effect of the previous one) + elif 'child' in criticality_stipulation.lower(): self.critical_failures = self.get_child_critical_node() + + # Find critical node such that it doesn't have to be a direct effect of the previous critical failure + elif 'global' in criticality_stipulation.lower(): + for critical_failure in self.critical_failures[1]: + self.G.remove_node(critical_failure) + self.critical_failures = self.get_critical_node(self.criticality_type) + + return self.critical_failures + + + def get_child_critical_node(self): + '''Get the critical nodes when user desires future critical nodes be children of original critical failure(s) + Parameters + ---------- + None + ''' + # Create list of direct effects from critical failure(s) + nodalNames = np.array(list(self.G.nodes)) + critical_prob = [0, []] + critical_successors = [] + for critical_failure in self.critical_failures[1]: + node_index = np.where(nodalNames == critical_failure)[0][0] + nodes = diagonal_nodes(nx.to_numpy_array(self.G)) + a = make_binary(nx.to_numpy_array(self.G), 0) + child_bool = nodes @ a[:, node_index] + children = child_bool[np.nonzero(child_bool)] + for child in children: + if not(nodalNames[int(child)] in critical_successors): critical_successors.append(nodalNames[int(child)]) + self.G.remove_node(critical_failure) + + # Find critical failure for critical indicating the maximum initial probability + if 'prob' in self.criticality_type: + nodal_probabilities = nx.get_node_attributes(self.G.subgraph(critical_successors), "probability") + for failure_node in nodal_probabilities: + if nodal_probabilities[failure_node] > critical_prob[0]: critical_prob = [nodal_probabilities[failure_node], [failure_node]] + elif nodal_probabilities[failure_node] == critical_prob[0]: critical_prob[1].append(failure_node) + + # Find the critical failure for crtitical refering to the maximum degree (either in-degree, out-degree, or total-degree) + elif 'deg' in self.criticality_type: + out_deg, in_deg, deg = max_degrees(nx.to_numpy_array(self.G), nodalNames, threshold = 0, name = True) + if 'in' in self.criticality_type: critical_prob = [in_deg[1], list(in_deg[0])] + elif 'out' in self.criticality_type: critical_prob = [out_deg[1], list(out_deg[0])] + else: critical_prob = [deg[1], list(deg[0])] + + # Find the crtitical failure for ctitical refering to the susceptibility or impact of a node + elif 'sus' in self.criticality_type or 'impact' in self.criticality_type: + if 'sus' in self.criticality_type: row_num = 1 + elif 'impact' in self.criticality_type: row_num = 0 + vals = np.zeros(len(critical_successors), 2) + for node in critical_successors: vals[0] = self.mean_is[np.where(nodalNames == node)[0][0]] + critical_prob = [np.max(vals), critical_successors[np.where(critical_successors == np.max(vals))]] + + self.critical_failures = critical_prob + return critical_prob \ No newline at end of file diff --git a/famodel/OntologySample600m.yaml b/famodel/OntologySample600m.yaml index 45c32141..448f2dfa 100644 --- a/famodel/OntologySample600m.yaml +++ b/famodel/OntologySample600m.yaml @@ -1591,13 +1591,9 @@ cables: sections: # refers to a configuration in cable_configs # first entry is at end A, last entry is at end B - type: dynamic_lazy_wave1 - - connectorType: joint_1 - - type: static_1 - - connectorType: joint_1 - - type: dynamic_lazy_wave1 diff --git a/famodel/anchors/capacity_dandg.py b/famodel/anchors/capacity_dandg.py new file mode 100644 index 00000000..8ed2e99c --- /dev/null +++ b/famodel/anchors/capacity_dandg.py @@ -0,0 +1,369 @@ +# Copyright Asitha Senanayake 2020 + +import numpy as np +from matplotlib.pyplot import plot, draw, show +import matplotlib.pyplot as plt +from scipy.interpolate import interp1d +import inspect + +################################### +#### Pile Geometry and Loading #### +################################### + +def py_analysis(profile, L, D, t, E, + V, H, H_n, M, M_n, n=10, iterations=10, + print_output='Yes', convergence_tracker='Yes', loc=2): + ''' + Models a laterally loaded pile using the p-y method. The solution for + lateral displacements is obtained by solving the 4th order ODE, + EI*d4y/dz4 - F*d2y/dz2 + ky = 0 using the finite difference method. + + Assumes that EI remains constant with respect to curvature i.e. pile + material remains in the elastic region. + + Input: + ----- + Su_profile - A 2D array of depths (m) and corresponding undrained shear strength(Pa) + Eg: array([[z1,Su1],[z2,Su2],[z3,Su3]...]) + Use small values for Su (eg: 0.001) instead of zeros to avoid divisions by zero but always start z at 0.0 + Example of a valid data point at the mudline is [0.0, 0.001] + + L - Length of pile (m) + D - Outer diameter of pile (m) + t - Wall thickness of pile (m) + E - Elastic modulus of pile material (Pa) + F - Axial force at pile head (N), vertically downwards is postive. + V - Force at pile head (N), shear causing clockwise rotation of pile is positive. + M - Moment at pile head (N*m), moments causing tension on left side of pile is positive. + n - Number of elements (50 by default) + iterations - Number of iterations to repeat calculation in order obtain convergence of 'y' + (A better approach is to iterate until a predefined tolerance is achieved but this requires additional + coding so, I will implement this later.) + py_model - Select which p-y model to use, 'Matlock' or 'Jeanjean_2009', 'MM-1', or 'Jeajean_etal_2017'. + + Optional: + convergence_tracker - Track how k_secant converges to actual p-y curve at a selected node + loc - Node number at which k_secant to be tracked (2 to n+1) + + Output: + ------ + y - Lateral displacement at each node, length = n + 5, (n+1) real nodes and 4 imaginary nodes + z - Vector of node locations along pile + ''' + + # Extract optional keyword arguments + # ls = 'x' + + # Resistance factor + gamma_f = 1.3 + + # Convert L and D to floating point numbers to avoid rounding errors + L = float(L) + D = float(D) + + # Pile geometry + I = np.pi*(D**4 - (D - 2*t)**4)/64.0 + EI = E*I + h = L/n # Element size + N = (n + 1) + 4 # (n+1) Real + 4 Imaginary nodes + + # Array for displacements at nodes, including imaginary nodes. + y = np.ones(N)*(0.01*D) # An initial value of 0.01D was arbitrarily chosen + + # Initialize and assemble array/list of p-y curves at each real node + z = np.zeros(N) + py_funs = [] + k_secant = np.zeros(N) + + for i in [0,1]: # Top two imaginary nodes + z[i] = (i-2)*h + py_funs.append(0) + k_secant[i] = 0.0 + + for i in range(2,n+3): # Real nodes + z[i] = (i - 2)*h + # Extract rock profile data + f_UCS, f_Em = rock_profile(profile) + UCS, Em = f_UCS(z[i])/gamma_f, f_Em(z[i])/gamma_f + py_funs.append(py_Reese(z[i], D, UCS, Em)) + k_secant[i] = py_funs[i](y[i])/y[i] + + for i in [n+3,n+4]: # Bottom two imaginary nodes + z[i] = (i - 2)*h + py_funs.append(0) + k_secant[i] = 0.0 + + # Track k_secant and current displacements + if convergence_tracker == 'Yes': + y1 = np.linspace(-2.*D,2.*D,500) + plt.plot(y1,py_funs[loc](y1)) + plt.xlabel('y (m)'), plt.ylabel('p (N/m)') + plt.grid(True) + + for j in range(iterations): + # if j == 0: print 'FD Solver started!' + y = fd_solver(n, N, h, EI, V, H, H_n, M, M_n, k_secant) + + if convergence_tracker == 'Yes': + plt.plot(y[loc], k_secant[loc]*y[loc]) + + for i in range(2,n+3): + k_secant[i] = py_funs[i](y[i])/y[i] + + if print_output == 'Yes': + print(f'y_0 = {y[2]:.3f} m') + + return y[2:-2], z[2:-2] + +################# +#### Solvers #### +################# + +def fd_solver(n, N, h, EI, V, H, H_n, M, M_n, k_secant): + ''' + Solves the finite difference equations from 'py_analysis_1'. This function should be run iteratively for + non-linear p-y curves by updating 'k_secant' using 'y'. A single iteration is sufficient if the p-y curves + are linear. + + Input: + ----- + n - Number of elements + N - Total number of nodes + h - Element size + EI - Flexural rigidity of pile + V - Axial force at pile head + H - Shear at pile head/tip + M - Moment at pile head/tip + k_secant - Secant stiffness from p-y curves + + Output: + ------ + y_updated - Lateral displacement at each node + ''' + + from scipy import linalg + + # Initialize and assemble matrix + X = np.zeros((N,N)) + + # (n+1) finite difference equations for (n+1) real nodes + for i in range(0,n+1): + X[i,i] = 1.0 + X[i,i+1] = -4.0 + V*h**2/EI + X[i,i+2] = 6.0 - 2*V*h**2/EI + k_secant[i+2]*h**4/EI + X[i,i+3] = -4.0 + V*h**2/EI + X[i,i+4] = 1.0 + + # Curvature at pile head + X[n+1,1] = 1.0 + X[n+1,2] = -2.0 + X[n+1,3] = 1.0 + + # Shear at pile head + X[n+2,0] = -1.0 + X[n+2,1] = 2.0 - V*h**2/EI + X[n+2,2] = 0.0 + X[n+2,3] = -2.0 + V*h**2/EI + X[n+2,4] = 1.0 + + # Curvature at pile tip + X[n+3,-2] = 1.0 + X[n+3,-3] = -2.0 + X[n+3,-4] = 1.0 + + # Shear at pile tip + X[n+4,-1] = 1.0 + X[n+4,-2] = -2.0 + V*h**2/EI + X[n+4,-3] = 0.0 + X[n+4,-4] = 2.0 - V*h**2/EI + X[n+4,-5] = -1.0 + + # Initialize vector q + q = np.zeros(N) + + # Populate q with boundary conditions + q[-1] = 2*H_n*h**3 # Shear at pile tip + q[-2] = M_n*h**2 # Moment at pile tip + q[-3] = 2*H*h**3 # Shear at pile head + q[-4] = M*h**2 # Moment at pile head + + y = linalg.solve(EI*X,q) + + return y + +############################### +#### P-Y Curve Definitions #### +############################### + +def py_Reese(z, D, UCS, Em, z_0=0.0, RQD=69, print_curves='Yes'): + ''' + Returns an interp1d interpolation function which represents the Reese (1997) p-y curve at the depth of interest. + + Important: Make sure to import the interp1 function by running 'from scipy.interpolate import interp1d' in the main program. + + Input: + ----- + z - Depth relative to pile head (m) + D - Pile diameter (m) + UCS - Undrained shear strength (Pa) + Em - Effectve vertical stress (Pa) + z_0 - Load eccentricity above the mudline or depth to mudline relative to the pile head (m) + RQD - Strain at half the strength as defined by Matlock (1970). + Typically ranges from 0.005 (stiff clay) to 0.02 (soft clay). + + Output: + ------ + Returns an interp1d interpolation function which represents the p-y curve at the depth of interest. + 'p' (N/m) and 'y' (m). + ''' + + from scipy.interpolate import interp1d + global var_Reese + + Dref = 0.305; nhu = 0.3; E = 200e9 + I = np.pi*(D**4 - (D - 2*t)**4)/64.0 + EI = E*I + alpha = -0.00667*RQD + 1 + krm = 0.0005 + + if z < 3*D: + p_ur = alpha*UCS*D*(1 + 1.4*z/D) + #kir = (100 +400*z/(3*D)) + else: + p_ur = 5.2*alpha*UCS*D + #kir = 500 + + kir = (D/Dref)*2**(-2*nhu)*(EI/(Em*D**4))**0.284 + Kir = kir*Em + y_rm = krm*D + y_a = (p_ur/(2*y_rm**0.25*Kir))**1.333 + + N = 20 + y = np.concatenate((-np.logspace(1,-3,N),[0],np.logspace(-3,1,N))) + + p=[]; P=[]; + for i in range (len(y)): + if abs(y[i]) < y_a: + P = np.sign(y[i])*Kir*y[i] + elif abs(y[i]) > y_a: + P = min((p_ur/2)*(abs(y[i])/y_rm)**0.25,p_ur) + p.append(P) + + p = np.array(p).squeeze() + for j in range(len(y)): + if y[j] < 0: + p[j] = -1*p[j] + elif y[j] > 0: + p[j] = p[j] + + var_Reese = inspect.currentframe().f_locals + + f = interp1d(y,p) # Interpolation function for p-y curve + + if print_curves == 'Yes': + plt.plot(y,p), + plt.xlabel('y (m)'), + plt.ylabel('p (kN/m)'), + plt.title('PY Curves - Reese (1997)') + plt.grid(True) + plt.xlim([-0.03*D,0.03*D]) + plt.ylim([min(p),max(p)]) + + return f # This is f (linear interpolation of y-p) + +####################### +#### Rock Profile ##### +####################### + +def rock_profile(profile,plot_profile='No'): + ''' + Define the (weak) rock profile used by the p-y analyzer. Outputs 'interp1d' functions containing + UCS and Em profiles to be used by the p-y curve functions. + + Input: + ----- + profile - A 2D tuple in the following format: ([depth (m), UCS (MPa), Em (MPa), py-model]) + The soil profile should be defined relative to the pile/tower head (i.e. point of lateral load application) + so that any load eccentricities can be taken into account. An example soil profile is shown below. + Eg: array([[z0,UCS0,Em0, 'Matlock', 0.02], + [z1,UCS1,Em1, 'Matlock', 0.01], + [z2,UCS2,Em2, 'Jeanjean', 550], + ...]) + *The current program cannot define layers with different p-y models. But it will added in the future. + + plot_profile - Plot Su vs depth profile. Choose 'Yes' to plot. + + Output: + ------ + z0 - Depth of mudline relative to the pile head (m) + f_UCS - 'interp1d' function containing undrained shear strength profile (Pa) + f_Em - 'interp1d' function containing effective vertical stress profile (Pa) + ''' + + from scipy.interpolate import interp1d + global var_rock_profile + + # Depth of mudline relative to pile head + z0 = profile[0,0].astype(float) + + # Extract data from soil_profile array and zero strength virtual soil layer + # from the pile head down to the mudline + depth = np.concatenate([np.array([z0]),profile[:,0].astype(float)]) # m + UCS = np.concatenate([np.array([0]),profile[:,1].astype(float)]) # MPa + Em = np.concatenate([np.array([0]),profile[:,2].astype(float)]) # MPa + + if plot_profile == 'Yes': + # Plot UCS vs z profile for confirmation + #fig2, ax2 = plt.subplots(1,1) + plt.plot(UCS,depth,'-',label=r'$S_u$',color='blue') + plt.legend(loc='lower left') + plt.xlabel('Uncompressed confined strength (MPa)'), + plt.ylabel('Depth below the pile head (m)'), plt.grid(True) + # Plot mudline/ground surface + plt.plot([-0.5*max(UCS),max(UCS)],[z0,z0],'--',color='red') + plt.text(-0.5*max(UCS),0.95*z0,'Mudline',color='red') + ax = plt.gca(); ax.invert_yaxis(), + ax.xaxis.tick_top(), ax.xaxis.set_label_position('top') + + # Define interpolation functions + f_UCS = interp1d(depth, UCS*1e6, kind='linear') # Pa + f_Em = interp1d(depth, Em*1e6, kind='linear') # Pa + + var_rock_profile = inspect.currentframe().f_locals + + return f_UCS, f_Em + +if __name__ == '__main__': + + # depth UCS Em p-y model + profile = np.array([[0.0, 3.5, 190., 'Name of p-y model'], + [3.0, 3.5, 280., 'Name of p-y model'], + [8.0, 3.5, 400., 'Name of p-y model'], + [15.0, 3.5, 450., 'Name of p-y model']]) + + f_UCS, f_Em = rock_profile(profile,plot_profile='No') + + #Pile dimensions + L = 15.0 # Pile length (m) + D = 2.8 # Pile diameter (m) + t = 0.075 # Pile wall thickness (m) + E = 200e9 # Elastic modulus of steel (Pa) + + #Pile head loads + H = 54e6 # Horizontal load on pile head (N) + V = 10e6 # Vertical load on pile head (N) + M = 0 # Moment on pile head (N*m) + H_n = 0 # Horizontal load on pile tip (N) + M_n = 0 # Moment on pile tip (N*m) + + y,z = py_analysis(profile, L=L, D=D, t=t, E=E, + V=V, H=H, H_n=H_n, M=M, M_n=M_n) + + #Plot deflection profile of pile + fig, ax = plt.subplots(figsize=(3,5)) + ax.plot(y,z,'r') + ax.set_xlabel('Displacement [m]') + ax.set_ylabel('Depth below pile head [m]') + ax.set_ylim([L + 2,-2]) + ax.set_xlim([-0.03*D,0.03*D]) + ax.grid(ls='--') \ No newline at end of file diff --git a/famodel/cables/cable.py b/famodel/cables/cable.py index 1c53af07..8c7e6b14 100644 --- a/famodel/cables/cable.py +++ b/famodel/cables/cable.py @@ -94,6 +94,9 @@ def __init__(self, id, d=None): for i in self.dd['cables']: # self.subcomponents: self.L += i.L + # failure probability + self.failure_probability = {} + def reposition(self): # reposition cable and set end points for the first and last cable sections (or the dynamic cable for a suspended cable) headingA = self.subcomponents[0].headingA + self.attached_to[0].phi diff --git a/famodel/cables/components.py b/famodel/cables/components.py index af6a7254..9f108339 100644 --- a/famodel/cables/components.py +++ b/famodel/cables/components.py @@ -32,6 +32,9 @@ def __init__(self,id, r=None,**kwargs): # MoorPy Point Object for Joint self.mpConn = None + # Dictionary for failure probability + self.failure_probability = {} + #this might be useful for the ends of dynamic cables def makeMoorPyConnector(self, ms): '''Create a MoorPy connector object in a MoorPy system diff --git a/famodel/cables/dynamic_cable.py b/famodel/cables/dynamic_cable.py index 73b4ebae..745670ec 100644 --- a/famodel/cables/dynamic_cable.py +++ b/famodel/cables/dynamic_cable.py @@ -97,6 +97,7 @@ def __init__(self, id, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0 self.loads = {} self.reliability = {} self.cost = {} + self.failure_probability = {} def makeCableType(self,cabDict): ''' @@ -318,7 +319,7 @@ def createSubsystem(self, case=0): if self.ss: print('A subsystem for this Dynamic cable class instance already exists, this will be overwritten.') - self.ss=Subsystem(depth=-self.dd['zAnchor'], rho=self.rho, g=self.g, span=self.dd['span'], rBfair=self.rB) + self.ss=Subsystem(depth=-self.dd['zAnchor'], rho=self.rho, g=self.g, span=self.dd['span']) lengths = [] types = [] nsegs = [] @@ -333,7 +334,7 @@ def createSubsystem(self, case=0): elif nsegs[-1] < 3: nsegs[-1] = 3 # make the lines and set the points - self.ss.makeGeneric(lengths,types,suspended=case,nsegs=nsegs) + self.ss.makeGeneric(lengths,types,suspended=case) self.ss.setEndPosition(self.rA,endB=0) self.ss.setEndPosition(self.rB,endB=1) diff --git a/famodel/cables/static_cable.py b/famodel/cables/static_cable.py index a3dbdfd1..b04ba8bf 100644 --- a/famodel/cables/static_cable.py +++ b/famodel/cables/static_cable.py @@ -59,6 +59,7 @@ def __init__(self, id, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0 self.loads = {} self.reliability = {} self.cost = {} + self.failure_probability = {} def getLength(self): diff --git a/famodel/famodel_base.py b/famodel/famodel_base.py index caddc939..7d73a7d3 100644 --- a/famodel/famodel_base.py +++ b/famodel/famodel_base.py @@ -568,6 +568,15 @@ def setEndPosition(self, r, end): raise Exception('End A or B must be specified with either the letter, 0/1, or False/True.') + def delete(self): + '''Detach the point from anything it's attached to, then delete the + object (if such a thing is possible?).''' + + self.detachFrom(0) # detach end A + self.detachFrom(1) # detach end B + + # next step would just be to separately remove any other references + # to this object... # general functions diff --git a/famodel/mooring/anchor.py b/famodel/mooring/anchor.py index 29732090..8de6b873 100644 --- a/famodel/mooring/anchor.py +++ b/famodel/mooring/anchor.py @@ -71,6 +71,7 @@ def __init__(self, dd=None, ms=None, r=[0,0,0], aNum=None,id=None): } ''' self.soilProps = {} + self.failure_probability = {} # self.cost = {} def makeMoorPyAnchor(self, ms): diff --git a/famodel/mooring/connector.py b/famodel/mooring/connector.py index 510363aa..9c6192eb 100644 --- a/famodel/mooring/connector.py +++ b/famodel/mooring/connector.py @@ -38,6 +38,9 @@ def __init__(self,id, r=[0,0,0], **kwargs): # MoorPy Point Object for Connector self.mpConn = None + # dictionary of failure probabilities + self.failure_probability = {} + def makeMoorPyConnector(self, ms): '''Create a MoorPy connector object in a MoorPy system Parameters @@ -59,6 +62,7 @@ def makeMoorPyConnector(self, ms): self.mpConn.m = self['m'] self.mpConn.v = self['v'] self.mpConn.CdA = self['CdA'] + return(ms) @@ -72,6 +76,9 @@ def __init__(self,id, **kwargs): dict.__init__(self, **kwargs) # initialize dict base class Edge.__init__(self, id) # initialize Edge base class + + # Dictionary of failure probabilities + self.failure_probability = {} # <<< have a ['type'] entry that stores a type, which could be used for sizing... diff --git a/famodel/mooring/mooring.py b/famodel/mooring/mooring.py index f8eb25be..bb04807a 100644 --- a/famodel/mooring/mooring.py +++ b/famodel/mooring/mooring.py @@ -15,8 +15,7 @@ class Mooring(Edge): Work in progress. Eventually will inherit from Edge. ''' - def __init__(self, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0,0], - rad_anch=500, rad_fair=58, z_anch=-100, z_fair=-14, + def __init__(self, dd=None, subsystem=None, anchor=None, rho=1025, g=9.81,id=None): ''' Parameters @@ -41,6 +40,8 @@ def __init__(self, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0,0], ] span zAnchor + z_fair + rad_fair EndPositions: { endA, endB @@ -53,10 +54,11 @@ def __init__(self, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0,0], some additional manual setup of the mooring object after it is called. <<< - ''' + ''' Edge.__init__(self, id) # initialize Edge base class # Design description dictionary for this Mooring self.dd = dd + # let's turn the dd into something that holds subdict objects of connectors and sections if self.dd: # >>> list of sections ? And optional of section types (e,g, chian, poly) @@ -65,7 +67,7 @@ def __init__(self, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0,0], # Turn what's in dd and turn it into Sections and Connectors for i, con in enumerate(self.dd['connectors']): - if con: + if con and 'type' in con: Cid = con['type']+str(i) else: Cid = 'Conn'+str(i) @@ -76,8 +78,10 @@ def __init__(self, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0,0], #self.dd['connectors'][i ].attach(self.dd['sections'][i], end=0) #self.dd['connectors'][i+1].attach(self.dd['sections'][i], end=1) + # >>> add some error checks for the correct lengths <<< + # Connect them and store them in self(Edge).subcomponents! - subcons = [] # list of node-edge-node... to pass to the function + subcons = [] # temporary list of node-edge-node... to pass to the function for i in range(self.n_sec): subcons.append(self.dd['connectors'][i]) subcons.append(self.dd['sections'][i]) @@ -90,17 +94,29 @@ def __init__(self, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0,0], # MoorPy subsystem that corresponds to the mooring line self.ss = subsystem + # workaround for users who are making mooring objects based on pre-existing subsystems + if self.ss: + self.dd = {} + if not 'zAnchor' in self.dd: + self.dd['zAnchor'] = -self.ss.depth + if not 'span' in self.dd: + self.dd['span'] = self.ss.span + if not 'rad_fair' in self.dd: + self.dd['rad_fair'] = self.ss.rad_fair + if not 'z_fair' in self.dd: + self.dd['z_fair'] = self.ss.z_fair - # end point absolute coordinates, to be set later - self.rA = rA - self.rB = rB - self.heading = 0 # relative positions (variables could be renamed) - self.rad_anch = rad_anch - self.rad_fair = rad_fair - self.z_anch = z_anch - self.z_fair = z_fair + self.rad_anch = self.dd['span'] + self.dd['rad_fair'] + self.rad_fair = self.dd['rad_fair'] + self.z_anch = self.dd['zAnchor'] + self.z_fair = self.dd['z_fair'] + + # end point absolute coordinates, to be set later + self.rA = np.array([-self.rad_anch, 0, self.z_anch]) + self.rB = np.array([-self.rad_fair, 0, self.z_fair]) + self.heading = 270 # compass heading from B to A [deg] self.adjuster = None # custom function that can adjust the mooring @@ -115,6 +131,7 @@ def __init__(self, dd=None, subsystem=None, anchor=None, rA=[0,0,0], rB=[0,0,0], self.loads = {} self.reliability = {} self.cost = {} + self.failure_probability = {} def setSectionLength(self, L, i): @@ -135,6 +152,17 @@ def setSectionDiameter(self, d, i): self.sectionType[i].update( setLineType( ... d)) """ + def setSectionType(self, lineType, i): + '''Sets lineType of section, including in the subdsystem + if there is one.''' + + # set type dict in dd (which is also Section/subcomponent) + self.dd['sections'][i]['type'] = lineType + + if self.ss: # is Subsystem exists, adjust length there too + self.ss.lineTypes[i] = lineType + + def reposition(self, r_center=None, heading=None, project=None, degrees=False, **kwargs): '''Adjusts mooring position based on changed platform location or heading. It can call a custom "adjuster" function if one is @@ -145,8 +173,8 @@ def reposition(self, r_center=None, heading=None, project=None, degrees=False, * r_center The x, y coordinates of the platform (undisplaced) [m]. heading : float - The absolute heading of the mooring line [deg or rad] depending on - degrees parameter (True or False). + The absolute heading compass direeciton of the mooring line + [deg or rad] depending on degrees parameter (True or False). project : FAModel Project, optional A Project-type object for site-specific information used in custom mooring line adjustment functions (mooring.adjuster). @@ -157,12 +185,14 @@ def reposition(self, r_center=None, heading=None, project=None, degrees=False, * # Adjust heading if provided if not heading == None: if degrees: - self.heading = np.radians(heading) - else: self.heading = heading - + else: + self.heading = np.degrees(heading) + + phi = np.radians(90-self.heading) # heading in x-y radian convention [rad] + # heading 2D unit vector - u = np.array([np.cos(self.heading), np.sin(self.heading)]) + u = np.array([np.cos(phi), np.sin(phi)]) #print(u) r_center = np.array(r_center)[:2] # Set the updated fairlead location @@ -239,39 +269,44 @@ def createSubsystem(self, case=0): # check if a subsystem already exists if self.ss: print('A subsystem for this Mooring class instance already exists, this will be overwritten.') - self.ss=Subsystem(depth=-self.dd['zAnchor'], rho=self.rho, g=self.g, span=self.dd['span'],rAFair=self.rA, rBFair=self.rB) + self.ss=Subsystem(depth=-self.dd['zAnchor'], rho=self.rho, g=self.g, + span=self.dd['span'], rad_fair=self.rad_fair, + z_fair=self.z_fair) lengths = [] types = [] # run through each line section and collect the length and type - for sec in self.dd['sections']: - lengths.append(sec['length']) - types.append(sec['type']['name']) - self.ss.lineTypes[types[-1]] = sec['type'] # points to existing type dict in self.dd for now + for i, sec in enumerate(self.dd['sections']): + lengths.append(deepcopy(sec['length'])) + # points to existing type dict in self.dd for now + types.append(deepcopy(sec['type'])) # list of type names + #types.append(sec['type']['name']) # list of type names + #self.ss.lineTypes[i] = sec['type'] # make the lines and set the points - self.ss.makeGeneric(lengths,types,suspended=case) + self.ss.makeGeneric(lengths, types, + connectors=[self.dd['connectors'][ic+1] for ic in range(len(self.dd['connectors'])-2)], + suspended=case) self.ss.setEndPosition(self.rA,endB=0) self.ss.setEndPosition(self.rB,endB=1) - # note: next bit has similar code/function as Connector.makeMoorPyConnector <<< # add in connector info to subsystem points - if case == 0: # has an anchor - need to ignore connection for first point + if case == 0: # has an anchor - need to ignore connection for first point because anchor is a point itself so can't have a point attached to a point startNum = 1 else: # no anchor - need to include all connections startNum = 0 for i in range(startNum,len(self.ss.pointList)): point = self.ss.pointList[i] - point.m = self.dd['connectors'][i]['m'] - point.v = self.dd['connectors'][i]['v'] + # point.m = self.dd['connectors'][i]['m'] # now done in ss.makeGeneric + # point.v = self.dd['connectors'][i]['v'] # now done in ss.makeGeneric point.CdA = self.dd['connectors'][i]['CdA'] # solve the system self.ss.staticSolve() - return(self.ss) + return(self.ss) """ # rough method ideas...maybe not necessary or can use existing dict methods @@ -584,3 +619,22 @@ def addMarineGrowth(self, mgDict, project=None, idx=None): return(changeDepths,changePoints) + def addCorrosion(self,corrosion_mm=10): + ''' + Calculates MBL of chain line with corrosion included + + Parameters + ---------- + corrosion_mm : TYPE, optional + DESCRIPTION. The default is 10. + + Returns + ------- + None. + + ''' + for i,sec in enumerate(self.dd['sections']): + if sec['material']=='chain': + MBL_cor = sec['MBL']*( (sec['d_nom']-(corrosion_mm/1000))/sec['d_nom'] )**2 # corroded MBL + else: + MBL_cor = sec['MBL'] diff --git a/famodel/ontology/README.md b/famodel/ontology/README.md index b8169d8d..9efb17a8 100644 --- a/famodel/ontology/README.md +++ b/famodel/ontology/README.md @@ -12,14 +12,14 @@ is aligned with the work of IEA Wind Task 49, which focuses on integrated design of floating wind arrays. The ontology proposed here draws on elements from two established ontologies developed under a previous IEA Wind Task. Task 37 developed [plant-level and turbine-level ontologies](https://windio.readthedocs.io). -The current floating array ontology has a number of additions and differences that +The current floating array ontology has a number of additions and differences that better suit the scope and emphasis of floating wind arrays. The sections are as follows: * [Site](#site) * [General ](#general) * [Boundaries ](#boundaries) * [Exclusions ](#exclusions) - * [Seabed ](#bathymetry) + * [Bathymetry ](#bathymetry) * [Seabed ](#seabed) * [Metocean ](#metocean) * [Resource ](#resource) @@ -29,6 +29,7 @@ better suit the scope and emphasis of floating wind arrays. The sections are as * [Array Layout ](#array-layout) * [Array Mooring ](#array-mooring) * [Array Cables ](#array-cables) +* [Substation ](#substation) * [Turbine(s) ](#turbines) * [Platform(s) ](#platforms) * [Mooring ](#mooring) @@ -38,10 +39,11 @@ better suit the scope and emphasis of floating wind arrays. The sections are as * [Mooring Connectors ](#mooring-connectors) * [Anchor types ](#anchor-types) * [Cables ](#cables) - * [Cables with Routing ](#cables-with-routing) - * [Dynamic Cable Configurations ](#dynamic-cable-configurations) - * [Cable Cross Sectional Properties](#cable-cross-sectional-properties) + * [Top Level Cables ](#top-level-cables) + * [Cable Configurations ](#cable-configurations) + * [Cable Cross Sectional Properties](#cable-types) * [Cable Appendages ](#cable-appendages) + * [Cable Joints ](#cable-joints) The following sections give an overview of the array ontology makeup with examples. @@ -136,14 +138,31 @@ csv filename. ```yaml metocean: extremes: # extreme values for specified return periods (in years) - keys : [ Hs , Tp , WindSpeed, TI, Shear, Gamma, CurrentSpeed ] - data : - 1: [ , , ] - 10: [ , , ] - 50: [ , , ] - 500: [ , , ] + return periods : [ 1, 5, 50 ] # in years + all : # unconditional extreme values + Hs: [ , , ] + Tp: [ , , ] + Gamma: [ , , ] + WS: [ , , ] + TI: [ , , ] + Shear: [ , , ] + CS: [ , , ] + 8.5 9.8, 10.4, 11.8, 12.4, 13.7, 16.8, 18.1, 18.6, 19.8, 20.3, 21.4 + - probabalistic_bins: + WS_2_4 : # conditional values from wind speed range of 2 - 4 m/s + Hs: [ , , ] + Tp: [ , , ] + Gamma: [ , , ] + TI: [ , , ] + Shear: [ , , ] + CS: [ , , ] + + WS_4_6 : # conditional values from wind speed range of 4 - 6 m/s + ... + + + joint_probabality_bins: # a set of cases representing the joint metocean probability distribution keys : [ prob , Hs , Tp, WindSpeed, TI, Shear, Gamma, CurrentSpeed, WindDir, WaveDir, CurrentDir ] data : - [ 0.010 , , ] @@ -246,22 +265,26 @@ array_mooring: This section provides a straightforward and compact way to define the power cables in the array. For each end (A and B) of the cable, it specifies the -turbine attached to, the -[dynamic cable configuration](#dynamic-cable-configurations) used, and the heading -of the dynamic cable. The type of the static cable is also specified. -This method does not consider routing, and would assume the static cable takes -a straight path from the ends of the dynamic cables. -For additional detail related to cable routing, the alternative [cable](#cables-with-routing) -section should be used. +turbine (matching and ID in the array table) or substation (matching an ID in the substation section) +attached to, the [Top Level Cables](#top-level-cables) +used, the heading of the cable at the attachment of each end, and length adjustment. +Additional detail related to subsections of the top level cable, joints, and any cable +routing are in the [Cables](#cables) section. +The CableID refers to an entry in the [Top Level Cables](#top-level-cables) section. ```yaml -array_cables: - keys: [ AttachA, AttachB, DynCableA, DynCableB, headingA, headingB, cableType] +array_cables: + keys: [ CableID, AttachA, AttachB, headingA, headingB, lengthAdjust] data: - - [ turbine1, turbine2, lazy_wave1, lazy_wave1, 180, 30, static_36] - - [ turbine2, turbine3, lazy_wave1, lazy_wave1, 150, 30, static_36] + - [ array_cable1, fowt1, f2, 180, 180, 0] + - [ suspended_cable1, f2, substation1, 0, 180, 0] ``` +## Substation(s) + +The substation section defines the substations used in the array. The substation key (name) must be unique +from the ID keys in the array layout table. + ## Turbine(s) The turbine section can contain either a single turbine or a list of turbines, @@ -346,7 +369,8 @@ mooring_systems: ### Mooring Line Configurations The mooring line configurations lists the segment lengths and line types that make up each mooring line. Each line has a name that can then be specified -as the MooringConfigID in the mooring systems section. The anchoring radius (also known as the span), fairlead radius, and fairlead depth are also specified for each line configuration. +as the MooringConfigID in the mooring systems section. The span is specified for each configuration, which represents the distance in the x-y plane between +the two connection points of the line - i.e. between fairlead and anchor, or for shared lines, fairlead and fairlead. Fairlead radius and fairlead depth are specified in the [Platform](#platforms) section. Each line contains a list of sections that details the line section type and length. The line type name connects to information in the mooring [line section properties](#mooring-line-section-properties). Additionally, before and after each line section has an optional input which can list the @@ -358,7 +382,7 @@ There is also a True/False options for whether the section length is adjustable. Shared or suspended lines may also have an optional 'symmetric' input which, if set to true, signifies that the line is symmetric and only the first half of the line is provided in the 'sections' list. When loaded in to the project class, the mooring object will automatically be fully filled out by mirroring the first half of the line. If a connector is provided as the last item in the sections list for a symmetric line, it is assumed that the middle line is two identical lines with the given connector between, otherwise -the middle line (last line given in the list) is doubled in length in the mirroring process. For example, the 'shared_2_clump' config in the yaml below would produce a symmetric shared line with sections in the following order: +the middle line (last line given in the list) is doubled in length in the mirroring process. For example, the 'shared_2_clump' config in the yaml below would produce a symmetric shared line with sections in the following order an 80m section of poly_180, a clump weight, a 762 m section of poly_180 (note the doubled length), a clump weight, and finally an 80 m section of poly_180. @@ -368,11 +392,9 @@ an 80m section of poly_180, a clump weight, a 762 m section of poly_180 (note th taut-poly_1: # mooring line configuration identifier - name: Taut polyester configuration 1 # descriptive name + name: Taut polyester configuration 1 # descriptive name - anchoring_radius: 1131.37 - fairlead_radius: 40.5 - fairlead_depth: -20 + span: 800 # x-y distance between fairlead and anchor, or for shared lines, fairlead and fairlead sections: - connectorType: shackke # ID of a connector type (optional) @@ -392,9 +414,7 @@ an 80m section of poly_180, a clump weight, a 762 m section of poly_180 (note th name: Shared line with two clump weights symmetric: True - anchoring_radius: 1142 - fairlead_radius: 58 - fairlead_depth: -14 + span: 1142 sections: - type: poly_180 @@ -491,100 +511,116 @@ anchor_types: This section describes the cables through the array including both static and dynamic portions of cables. At the top level, each array cable going -between a pair of turbines (or a turbine and a substation) is defined. -This definition can either occur in the [array_cables](#array-cables) -section or the [cables](#cables-with-routing) section. -The latter provides additional options for defining -the cable routing and burial depth. - -### Cables with Routing - -The cables section contains a list of every cable in the array. Here, a cable -is defined as the full assembly of electrical connection equipment between -two turbines or a turbine and a substation. Similar to the [array_cables](#array-cables) -section, 'type' links to the cross-section property description of the static -portion of the cable. endA and endB sections define what each end of the cable -is attached to, at what heading it comes off at, and what dynamic cable -profile it uses. Additional fields specify the routing of the static portion -of the cable and the burial depth as as function of cable length. +between a pair of turbines (or a turbine and a substation) is defined. Each +subsection of cable is then defined in the [Cable Configurations](#cable-configs) +section, including buoyancy module layout. + + +### Top Level Cables + +A top-level cableis defined as the full assembly of electrical connection equipment between +two turbines or a turbine and a substation. 'type' links to the cable configuration description +of the subsections of the cable, which can be dynamic or static cables. The 'connectorType' refers to +an entry in the [Cable Joints](#cable-joints) section. The first entry in the sections list is connected +to end A, while the last entry is connected to end B. ```yaml cables: - - name : array_cable1 # descriptive cable name - type : static_cable_80 # cable section type ID - - endA: - attachID: turbine_1 # FOWT/substation/junction ID - heading: 180 # [deg] heading of attachment at end A - dynamicID: dynamic_lazy_wave1 # ID of dynamic cable configuration at this end - - endB: - attachID: turbine_2 # FOWT/substation/junction ID - heading: 30 # [deg] heading of attachment at end B - dynamicID: dynamic_lazy_wave1 # ID of dynamic cable configuration at this end - - routing_x_y_r: # optional vertex points along the cable route. Nonzero radius wraps around a point at that radius. - - [1000, 1200, 20] - - [2000, 1500, 20] - - burial: # optional definition of cable burial depth over its length - station: [0, 1] # length along cable, normalized by first and last value - depth : [0.1, 0.2] # [m] burial depth + array_cable1: + name: array cable with lazy wave - static - lazy wave sections # descriptive cable name + # type : static_cable_80 # cable section type ID + + sections: # refers to a configuration in cable_configs + # first entry is at end A, last entry is at end B + - type: dynamic_lazy_wave1 + + - connectorType: joint_1 + + - type: static_1 + + - connectorType: joint_1 + + - type: dynamic_lazy_wave1 - - name : array_cable_2 # descriptive cable name - type : static_cable_80 # cable section type ID - ... + + suspended_cable1: + sections: + - type: dynamic_suspended_1 ``` -## Dynamic Cable Configurations +### Cable Configurations -This section lists the dynamic cable configurations used in the array design. -Similar to the mooring_line_configs section, it details the assembly of -cable section that make up a dynamic cable profile, with links to the cross -sectional cable properties. Dynamic cable configurations have some special -properties including specification of the voltage, and the option of -specifying 'appendages' along the cable length, which can represent [discrete -objects](#cable-appendages) like buoyancy modules. +This section lists the cable configurations used in the array design. +The 'type' listed in the entry is either 'static' or 'dynamic'. +The 'cableFamily' key is used when the cable cross-sectional information +will be imported from the cableProps_default yaml (in which case, an area A +must be provided in mm^2), and the value for cableFamily must match an entry in the cableProps_default yaml. +Alternatively, if the cable cross-sectional properties will be provided in the [Cable Cross Sectional Properties](#cable-cross-sectional-properties) +section, the key 'typeID' will be used in place of 'cableFamily', and will +refer to an entry in the Cable Cross Sectional Properties list. -```yaml - dynamic_cable_configs: +The sections list provides details on the layout of buoyancy modules and clump weights, including the +distance of the buoyancy section midpoint from end A, the number of modules, the spacing between modules, +and the volume. The volume is only needed if the buoyancy module properties will be imported +from the cableProps_defaul yaml. As with the cable properties, the 'type' in the sections list must refer to +an entry in either the [Cable Appendages](#cable-appendages) section or in the cableProps_default.yaml. + +Static cables can have routing information listed as vertex points along the cable route, and the radius of curve. +Static cable burial information can also be provided. - lazy_wave1 +Similar to mooring lines, the span refers to the end to end distance of the line in the x-y plane. + +```yaml + basic1: + name: basic cable configuration, essentially a straight line between platforms + span: 1600 # [m] + type: dynamic + zJTube: -30 # depth out of J-tube that cable starts in m + + cableFamily: dynamic_cable_33 + length: 1700 # [m] + A: 100 # cable conductor cross-sectional area [mm^2] (Required for types listed in cable props yaml) + + + + + dynamic_lazy_wave1: name: Lazy wave configuration 1 (simpler approach) voltage: 66 # [kV] - span : # [m] horizontal distance to end of dynamic cable + span : 600 # [m] horizontal distance to end of dynamic cable + zJTube: -20 # depth the cable comes out of the j-tube + type: dynamic # dynamic or static + A: 300 - sections: - - type: dynamic_cable_27 # ID of a cable section type1 - length: 200 # [m] length (unstretched) - - - type: dynamic_cable_27_w_buoy # (section properties including averaged effect of buoyancy modules) - length: 300 - - - type: dynamic_cable_27 - length: 200 - - attachment: - type: j-tube - coordinate: # relative location - - lazy_wave2 - name: Lazy wave configuration 1 (more detailed approach) - voltage: # [kV] - span : # [m] horizontal distance to end of dynamic cable + cableFamily: dynamic_cable_33 # ID of a cable section type1 + length: 900 # [m] length (unstretched) + + sections: + - type: Buoyancy_750m #_w_buoy # (section properties including averaged effect of buoyancy modules) + L_mid: 450 # [m] from end A + N_modules: 5 # number of modules in this buoyancy section + spacing: 21 # [m] + V: 2 # [m^2] + + + static_1: + name: Static cable configuration 1 + voltage: 66 # [kV] + span: 350 + type: static - sections: - - type: dynamic_cable_27 # ID of a cable section type1 - length: 200 # [m] length (unstretched) - appendages: - type: buoyancy_module_1 - locations: [10,12,13.5,15,18] - - attachment: - type: j-tube - coordinate: # relative location + typeID: static_cable_36 + length: 2200 + + routing_x_y_r: # optional vertex points along the cable route. Nonzero radius wraps around a point at that radius. + - [1000, 1200, 20] + - [2000, 1500, 20] + + burial: # optional definition of cable burial depth over its length + station: [0, 1] # length along cable, normalized by first and last value + depth : [0.1, 0.2] # [m] burial depth ``` ### Cable Cross Sectional Properties @@ -626,7 +662,7 @@ such as buoyancy modules or cable protection system components. Each entry is given an identifier and can have a variety of parameters that describe its lumped properties, such as mass, volume, and drag coefficient-area product. These appendages are used in the -[dynamic_cable_configs](#dynamic-cable-configurations) section. +[cable_configs](#cable-configurations) section. ```yaml cable_appendages: @@ -637,3 +673,14 @@ product. These appendages are used in the CdA: 3.8 # [m^2] product of cross-sectional area and drag coefficient length: 2.2 # [m] length taked up along cable ``` + +### Cable Joints + +This section lists any cable joints that might connect cable subsections. Each entry is given +and identifier and parameters to describe the joint. These joints are used in the [Top Level Cables] +(#top-level-cables) section. +```yaml +cable_joints: + joint_1: + mass: 100000 # TBD +``` diff --git a/famodel/platform/platform.py b/famodel/platform/platform.py index d91c14a8..bca5683e 100644 --- a/famodel/platform/platform.py +++ b/famodel/platform/platform.py @@ -39,7 +39,7 @@ def __init__(self, id, r=[0,0], heading=0, mooring_headings=[60,180,300],rFair=N self.body = None # body object in MoorPy associated with the platform - self.mooring_headings = [np.radians(mooring_headings)] # headings of mooring lines [rad] + self.mooring_headings = np.radians(mooring_headings) # headings of mooring lines [rad] self.n_mooring = len(mooring_headings) # number of mooring lines @@ -55,9 +55,10 @@ def __init__(self, id, r=[0,0], heading=0, mooring_headings=[60,180,300],rFair=N self.loads = {} self.reliability = {} self.cost = {} + self.failure_probability = {} - def setPosition(self, r, heading=None, degrees=False): + def setPosition(self, r, heading=None, degrees=False,project=None): ''' Set the position/orientation of the platform as well as the associated anchor points. @@ -98,7 +99,7 @@ def setPosition(self, r, heading=None, degrees=False): heading_i = self.mooring_headings[i] + self.phi # Reposition the whole Mooring - self.attachments[mooring]['obj'].reposition(self.r, heading=heading_i) + self.attachments[mooring]['obj'].reposition(self.r, heading=heading_i,project=project) def mooringSystem(self,rotateBool=0,mList=None): ''' diff --git a/famodel/project.py b/famodel/project.py index a2796b3f..3216953d 100644 --- a/famodel/project.py +++ b/famodel/project.py @@ -10,7 +10,10 @@ from moorpy import helpers import yaml from copy import deepcopy -import raft +try: + import raft +except: + pass #from shapely.geometry import Point, Polygon, LineString @@ -306,13 +309,15 @@ def loadDesign(self, d): # I think we want to just store it as a dictionary # validate it, # then have it available for use when making Mooring objects and subsystems - def getMoorings(lineconfig): + def getMoorings(lineconfig,i): ''' Parameters ---------- lineconfig : string Line configuration type + i : int + Index in array table (essentially which platform) Returns ------- @@ -403,8 +408,9 @@ def getMoorings(lineconfig): m_config['zAnchor'] = -self.depth m_config['span'] = lineConfigs[lineconfig]['span'] m_config['name'] = lineconfig - # m_config['zFair'] = lineConfigs[lineconfig]['fairlead_depth'] - # m_config['rFair'] = lineConfigs[lineconfig]['fairlead_radius'] + # add fairlead radius and depth to dictionary + m_config['rad_fair'] = self.platformList[arrayInfo[i]['ID']].rFair + m_config['z_fair'] = self.platformList[arrayInfo[i]['ID']].zFair m_config['connectors'] = c_config # add connectors section to the mooring dict @@ -510,11 +516,13 @@ def getAnchors(lineAnch, mc=None,aNum=0): # lineconfig = mSystems[m_s]['data'][j][0] # create mooring and connector dictionary - m_config = getMoorings(lineconfig) + m_config = getMoorings(lineconfig,i) + # create mooring class instance as part of mooring list in the project class instance - mc = (Mooring(dd=m_config, rA=[m_config['span'],0,m_config['zAnchor']], rB=[self.platformList[arrayInfo[i]['ID']].rFair,0,self.platformList[arrayInfo[i]['ID']].zFair], - rad_anch=m_config['span'], z_anch=m_config['zAnchor'],rad_fair=self.platformList[arrayInfo[i]['ID']].rFair,z_fair=self.platformList[arrayInfo[i]['ID']].zFair,id=(arrayInfo[i]['ID'],mct))) + mc = (Mooring(dd=m_config, id=(arrayInfo[i]['ID'],mct))) + mc.rA = [m_config['span']+m_config['rad_fair'],0,m_config['zAnchor']] + mc.rB = [m_config['rad_fair'],0,m_config['z_fair']] # adjust end positions based on platform location and mooring and platform headings mc.reposition(r_center=self.platformList[arrayInfo[i]['ID']].r, heading=headings[j]+self.platformList[arrayInfo[i]['ID']].phi, project=self) # adjust anchor z location and rA based on location of anchor @@ -560,10 +568,6 @@ def getAnchors(lineAnch, mc=None,aNum=0): if arrayMooring: # get mooring line info for all lines for j in range(0, len(arrayMooring)): # run through each line - # get configuration for that line - lineconfig = arrayMooring[j]['MooringConfigID'] - # create mooring and connector dictionary for that line - m_config = getMoorings(lineconfig) PFNum = [] # platform ID(s) connected to the mooring line @@ -585,6 +589,7 @@ def getAnchors(lineAnch, mc=None,aNum=0): for k in range(0, len(arrayInfo)): if arrayInfo[k]['ID'] == PFNum[0]: rowB = arrayInfo[k] + Bnum = k elif arrayInfo[k]['ID'] == PFNum[1]: rowA = arrayInfo[k] # get headings (mooring heading combined with platform heading) @@ -593,9 +598,15 @@ def getAnchors(lineAnch, mc=None,aNum=0): # calculate fairlead locations (can't use reposition method because both ends need separate repositioning) Aloc = [rowA['x_location']+np.cos(headingA)*self.platformList[PFNum[1]].rFair, rowA['y_location']+np.sin(headingA)*self.platformList[PFNum[1]].rFair, self.platformList[PFNum[1]].zFair] Bloc = [rowB['x_location']+np.cos(headingB)*self.platformList[PFNum[0]].rFair, rowB['y_location']+np.sin(headingB)*self.platformList[PFNum[0]].rFair, self.platformList[PFNum[0]].zFair] + # get configuration for the line + lineconfig = arrayMooring[j]['MooringConfigID'] + # create mooring and connector dictionary for that line + m_config = getMoorings(lineconfig,Bnum) # create mooring class instance - mc = (Mooring(dd=m_config, rA=Aloc, rB=Bloc, rad_anch=m_config['span'], z_anch=m_config['zAnchor'],id=(PFNum[0],PFNum[1],mct))) + mc = (Mooring(dd=m_config, id=(PFNum[0],PFNum[1],mct))) mc.shared = 1 + mc.rA = Aloc + mc.rB = Bloc # add mooring object to project mooring list self.mooringList[(PFNum[0],PFNum[1],mct)] = mc # attach mooring object to platforms @@ -610,11 +621,17 @@ def getAnchors(lineAnch, mc=None,aNum=0): elif any(ids['ID'] == arrayMooring[j]['end A'] for ids in arrayAnchor): # end A is an anchor # get ID of platform connected to line PFNum.append(arrayMooring[j]['end B']) + for k in range(0,len(arrayInfo)): + if arrayInfo[k]['ID'] == PFNum[0]: + Bnum = k + # get configuration for that line + lineconfig = arrayMooring[j]['MooringConfigID'] + # create mooring and connector dictionary for that line + m_config = getMoorings(lineconfig,Bnum) # create mooring class instance - mc = (Mooring(dd=m_config, rA=[m_config['span'],0,m_config['zAnchor']], - rB=[self.platformList[PFNum[0]].rFair,0,self.platformList[PFNum[0]].zFair], - rad_anch=m_config['span'], rad_fair=self.platformList[PFNum[0]].rFair, - z_anch=m_config['zAnchor'], z_fair=self.platformList[PFNum[0]].zFair,id=(PFNum[0],mct))) + mc = (Mooring(dd=m_config, id=(PFNum[0],mct))) + mc.rA = [m_config['span']+self.platformList[PFNum[0]].rFair,0,m_config['zAnchor']] + mc.rB = [self.platformList[PFNum[0]].rFair,0,self.platformList[PFNum[0]].zFair] # adjust end positions based on platform location and mooring and platform heading mc.reposition(r_center=self.platformList[PFNum[0]].r, heading=np.radians(arrayMooring[j]['headingB'])+self.platformList[PFNum[0]].phi, project=self) @@ -1385,7 +1402,7 @@ def addCablesConnections(self,connDict): dd = {} cableType = connDict['props'][i][j] # running on assumption that each thing in props will have equivalent of a design dictionary # may need to add here to get the properties properly put in the design dictionary - + cable = cableType['name'] # create cable object self.cableList[(cable,i*j)] = SubseaCable((cable,i*j),dd=dd) # attach to platforms/substations @@ -2087,6 +2104,19 @@ def getMarineGrowth(self,mgDict_start,lines='all',tol=2): # assign the newly created subsystem into the right place in the line list self.ms.lineList[ii] = self.mooringList[i].subsystem + def updateFailureProbability(self): + ''' + Function to populate (or update) failure probability dictionaries in each object + based on failure probability calculations developed by Emma Slack + + To be filled in... + + Returns + ------- + None. + + ''' + def getFromDict(dict, key, shape=0, dtype=float, default=None, index=None): ''' Function to streamline getting values from design dictionary from YAML file, including error checking. diff --git a/famodel/substation/substation.py b/famodel/substation/substation.py index a4174085..e15bd078 100644 --- a/famodel/substation/substation.py +++ b/famodel/substation/substation.py @@ -22,4 +22,7 @@ def __init__(self, dd, id): self.zFair = None self.point = None + # dictionary of failure probability + self.failure_probability = {} + # further functionality to be added later \ No newline at end of file diff --git a/famodel/turbine/turbine.py b/famodel/turbine/turbine.py index d242a7b7..0ae4aee1 100644 --- a/famodel/turbine/turbine.py +++ b/famodel/turbine/turbine.py @@ -30,6 +30,7 @@ def __init__(self, dd): self.loads = {} self.reliability = {} self.cost = {} + self.failure_probability = {} def makeRotor(self):