import os
import csv
import gradio as gr
import tensorflow as tf
import numpy as np
import pandas as pd
from datetime import datetime
import utils
from huggingface_hub import Repository
import itertools
import GPyOpt
# Unique phase elements
# Load access tokens
WRITE_TOKEN = os.environ.get("WRITE_PER") # write
# Logs repo path
dataset_url = "https://huggingface.co/datasets/sandl/upload_alloy_hardness"
dataset_path = "logs_alloy_hardness.csv"
scaling_factors = {'PROPERTY: Calculated Density (g/cm$^3$)': (5.5, 13.7),
                   'PROPERTY: Calculated Young modulus (GPa)': (77.0, 336.0),
                   'PROPERTY: HV': (107.0, 1183.0),
                   'PROPERTY: YS (MPa)': (62.0, 3416.0)}
input_mapping = {'PROPERTY: BCC/FCC/other': {'BCC': 0, 'FCC': 1, 'OTHER': 2},#, 'nan': 2},
                 'PROPERTY: Processing method': {'ANNEAL': 0, 'CAST': 1, 'OTHER': 2, 'POWDER': 3, 'WROUGHT': 4},#, 'nan': 2},
                 'PROPERTY: Microstructure': {'B2': 0, 'B2+BCC': 1, 'B2+L12': 2, 'B2+Laves+Sec.': 3, 'B2+Sec.': 4, 'BCC': 5,
                                              'BCC+B2': 6, 'BCC+B2+FCC': 7, 'BCC+B2+FCC+Sec.': 8, 'BCC+B2+L12': 9, 'BCC+B2+Laves': 10,
                                              'BCC+B2+Sec.': 11, 'BCC+BCC': 12, 'BCC+BCC+HCP': 13, 'BCC+BCC+Laves': 14,
                                              'BCC+BCC+Laves(C14)': 15, 'BCC+BCC+Laves(C15)': 16, 'BCC+FCC': 17, 'BCC+HCP': 18,
                                              'BCC+Laves': 19, 'BCC+Laves(C14)': 20, 'BCC+Laves(C15)': 21, 'BCC+Laves+Sec.': 22,
                                              'BCC+Sec.': 23, 'FCC': 24, 'FCC+B2': 25, 'FCC+B2+Sec.': 26, 'FCC+BCC': 27,
                                              'FCC+BCC+B2': 28, 'FCC+BCC+B2+Sec.': 29, 'FCC+BCC+BCC': 30, 'FCC+BCC+Sec.': 31,
                                              'FCC+FCC': 32, 'FCC+HCP': 33, 'FCC+HCP+Sec.': 34, 'FCC+L12': 35, 'FCC+L12+B2': 36,
                                              'FCC+L12+Sec.': 37, 'FCC+Laves': 38, 'FCC+Laves(C14)': 39, 'FCC+Laves+Sec.': 40,
                                              'FCC+Sec.': 41, 'L12+B2': 42, 'Laves(C14)+Sec.': 43, 'OTHER': 44},#, 'nan': 44},
                 'PROPERTY: Single/Multiphase': {'': 0, 'M': 1, 'S': 2, 'OTHER': 3}}#, 'nan': 3}}
unique_phase_elements = ['B2', 'BCC', 'FCC', 'HCP', 'L12', 'Laves', 'Laves(C14)', 'Laves(C15)', 'Sec.', 'OTHER']
input_cols = {
    "PROPERTY: Alloy formula": "(PROPERTY: Alloy formula) "
                               "Enter alloy formula using proportions representation (i.e. Al0.25 Co1 Fe1 Ni1)",
    "PROPERTY: Single/Multiphase": "(PROPERTY: Single/Multiphase) "
                                   "Choose between Single (S), Multiphase (M) and other (OTHER)",
    "PROPERTY: BCC/FCC/other": "(PROPERTY: BCC/FCC/other) "
                               "Choose between BCC, FCC and other ",
    "PROPERTY: Processing method": "(PROPERTY: Processing method) "
                                   "Choose your processing method (ANNEAL, CAST, POWDER, WROUGHT or OTHER)",
    "PROPERTY: Microstructure": "(PROPERTY: Microstructure) "
                                "Choose the microstructure (SEC means the secondary/tertiary microstructure is not one of FCC, BCC, HCP, L12, B2, Laves, Laves (C14), Laves (C15))",
}
def process_microstructure(list_phases):
    permutations = list(itertools.permutations(list_phases))
    permutations_strings = [str('+'.join(list(e))) for e in permutations]
    for e in permutations_strings:
        if e in list(input_mapping['PROPERTY: Microstructure'].keys()):
            return e
    return 'OTHER'
    
def write_logs(message, message_type="Prediction"):
    """
    Write logs
    """
    #with Repository(local_dir="data", clone_from=dataset_url, use_auth_token=WRITE_TOKEN).commit(commit_message="from private", blocking=False):
     #   with open(dataset_path, "a") as csvfile:
      #          writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"])
       #         writer.writerow(
        #            {"name": message_type, "message": message, "time": str(datetime.now())}
         #       )
    return 
    
def predict(x, request: gr.Request):
    """
    Predict the hardness and yield strength using the ML model. Input data is a dataframe
    """
    loaded_model = tf.keras.models.load_model("hardness.h5")
    print("summary is", loaded_model.summary())
    #x = x.replace("", 0)
    x = np.asarray(x).astype("float32")
    y = loaded_model.predict(x)
    y_hardness = y[0][0]
    y_ys = y[0][1]
    minimum_hardness, maximum_hardness = scaling_factors['PROPERTY: HV']
    minimum_ys, maximum_ys = scaling_factors['PROPERTY: YS (MPa)']
    print("Prediction is ", y)
    if request is not None:   # Verify if request is not None (when building the app the first request is None)
        message = f"{request.username}_{request.client.host}"
        print("MESSAGE")
        print(message)
        res = write_logs(message)
    #interpret_fig = utils.interpret(x)
    return (round(y_hardness*(maximum_hardness-minimum_hardness)+minimum_hardness, 2), 12,
            round(y_ys*(maximum_ys-minimum_ys)+minimum_ys, 2), 12)
def predict_from_tuple(in1, in2, in3, in4, in5, request: gr.Request):
    """
    Predict the hardness using the ML model. Input data is a tuple. Input order should be the same as the cols list
    """
    input_tuple = (in1, in2, in3, in4, in5)
    formula = utils.normalize_and_alphabetize_formula(in1)
    density = utils.calculate_density(formula)
    young_modulus = utils.calculate_youngs_modulus(formula)
    input_dict = {}
    in2 = input_mapping['PROPERTY: Single/Multiphase'][str(in2)]
    input_dict['PROPERTY: Single/Multiphase'] = [int(in2)]
    
    in3 = input_mapping['PROPERTY: BCC/FCC/other'][str(in3)]
    input_dict['PROPERTY: BCC/FCC/other'] = [int(in3)]
    
    in4 = input_mapping['PROPERTY: Processing method'][str(in4)]
    input_dict['PROPERTY: Processing method'] = [int(in4)]
    in5 = process_microstructure(in5)
    in5 = input_mapping['PROPERTY: Microstructure'][in5]
    input_dict['PROPERTY: Microstructure'] = [int(in5)]
    
    density_scaling_factors = scaling_factors['PROPERTY: Calculated Density (g/cm$^3$)']
    density = (density-density_scaling_factors[0])/(
        density_scaling_factors[1]-density_scaling_factors[0])
    input_dict['PROPERTY: Calculated Density (g/cm$^3$)'] = [float(density)]
    
    ym_scaling_factors = scaling_factors['PROPERTY: Calculated Young modulus (GPa)']
    young_modulus = (young_modulus-ym_scaling_factors[0])/(
        ym_scaling_factors[1]-ym_scaling_factors[0])
    input_dict['PROPERTY: Calculated Young modulus (GPa)'] = [float(young_modulus)]
    input_df = pd.DataFrame.from_dict(input_dict)
    one_hot = utils.turn_into_one_hot(input_df, input_mapping)
    print("One hot columns are ", one_hot.columns)
    return predict(one_hot, request)
def fit_outputs_constraints(x, hardness_target, ys_target, request: gr.Request):
    predictions = predict(x, request)
    error_hardness = np.sqrt(np.square(predictions[0]-float(hardness_target)))
    error_ys = np.sqrt(np.square(predictions[2]-float(ys_target)))
    print("Optimization step is ", predictions, float(hardness_target), float(ys_target),
          error_hardness, error_ys)
    return error_hardness + error_ys
def predict_inverse(hardness_target, ys_target, formula, request: gr.Request):
    one_hot_columns = utils.return_feature_names()
    continuous_variables = ['PROPERTY: Calculated Density (g/cm$^3$)',
                         'PROPERTY: Calculated Young modulus (GPa)']
    categorical_variables = list(one_hot_columns)
    for c in continuous_variables:
        categorical_variables.remove(c)
    fixed_density = utils.calculate_density(str(formula))
    fixed_ym = utils.calculate_youngs_modulus(str(formula))
    
    domain = []
    for c in one_hot_columns:
        if c in continuous_variables:
            if c == continuous_variables[0]:
                domain_density = (fixed_density-scaling_factors[c][0])/(
                    scaling_factors[c][1]-scaling_factors[c][0])
                domain.append({'name': str(c), 'type': 'continuous', 'domain': (domain_density, domain_density)})#(0.,1.)})
            else:
                domain_ym = (fixed_ym-scaling_factors[c][0])/(
                    scaling_factors[c][1]-scaling_factors[c][0])
                domain.append({'name': str(c), 'type': 'continuous', 'domain': (domain_ym, domain_ym)})#(0.,1.)})
        else:
            domain.append({'name': str(c), 'type': 'discrete', 'domain': (0,1)})
    print("Domain is ", domain)
    constraints = []
    constrained_columns = ['Single/Multiphase', 'Preprocessing method', 'BCC/FCC/other']#, 'Microstructure']
    for constraint in constrained_columns:
        sum_string = ''
        for i in range (len(one_hot_columns)):
            column_one_hot = one_hot_columns[i]
            if column_one_hot.startswith(constraint):
                sum_string = sum_string+"+x[:," + str(i) + "]"
        constraints.append({'name': constraint + "+1", 'constraint': sum_string + '-1'})
        constraints.append({'name': constraint + "-1", 'constraint': '-1*(' + sum_string + ')+1'})
    def fit_outputs(x):
        return fit_outputs_constraints(x, hardness_target, ys_target, request)
    opt = GPyOpt.methods.BayesianOptimization(f = fit_outputs,            # function to optimize       
                                              domain = domain,        # box-constraints of the problem
                                              constraints = constraints,
                                              acquisition_type ='LCB',       # LCB acquisition
                                              acquisition_weight = 0.1)   # Exploration exploitation
    # it may take a few seconds
    opt.run_optimization(max_iter=20)
    opt.plot_convergence()
    x_best = opt.X[np.argmin(opt.Y)]
    best_params = dict(zip(
        [el['name'] for el in domain],
        [[x] for x in x_best]))
    optimized_x = pd.DataFrame.from_dict(best_params)
    #for c in optimized_x.columns:
     #   if c in continuous_variables:
      #      optimized_x[c]=optimized_x[c]*(scaling_factors[c][1]-scaling_factors[c][0])+scaling_factors[c][0]
    optimized_x = optimized_x[['PROPERTY: Calculated Density (g/cm$^3$)',
                               'PROPERTY: Calculated Young modulus (GPa)',
                               'Preprocessing method ANNEAL',
                               'Preprocessing method CAST', 'Preprocessing method OTHER',
                               'Preprocessing method POWDER', 'Preprocessing method WROUGHT',
                               'BCC/FCC/other BCC', 'BCC/FCC/other FCC', 'BCC/FCC/other OTHER',
                               'Single/Multiphase ', 'Single/Multiphase M', 'Single/Multiphase S']]
    result = optimized_x
    result = result[result>0.0].dropna(axis=1)
    return list(result.keys())[2:]
example_inputs = ["Al0.25 Co1 Fe1 Ni1", 820, 1800]
css_styling = """#submit {background: #1eccd8} 
#submit:hover {background: #a2f1f6} 
.output-image, .input-image, .image-preview {height: 250px !important}
.output-plot {height: 250px !important}"""
light_theme_colors = gr.themes.Color(c50="#e4f3fa", # Dataframe background cell content - light mode only
                                c100="#e4f3fa", # Top corner of clear button in light mode + markdown text in dark mode
                                c200="#a1c6db", # Component borders
                                c300="#FFFFFF", # 
                                c400="#e4f3fa", # Footer text
                                c500="#0c1538", # Text of component headers in light mode only
                                c600="#a1c6db", # Top corner of button in dark mode
                                c700="#475383", # Button text in light mode + component borders in dark mode
                                c800="#0c1538", # Markdown text in light mode
                                c900="#a1c6db", # Background of dataframe - dark mode
                                c950="#0c1538") # Background in dark mode only
# secondary color used for highlight box content when typing in light mode, and download option in dark mode
# primary color used for login button in dark mode
osium_theme = gr.themes.Default(primary_hue="cyan", secondary_hue="cyan", neutral_hue=light_theme_colors)
page_title = "Alloys' hardness and yield strength prediction"
favicon_path = "osiumai_favicon.ico"
logo_path  = "osiumai_logo.jpg"
html = f""" 
 """
with gr.Blocks(css=css_styling, title=page_title, theme=osium_theme) as demo:
    #gr.HTML(html)
    gr.Markdown("# 
Get optimal alloy recommendations based on your target performance
") gr.Markdown("This AI model provides a recommended alloy formula, microstructure and processing conditions based on your target hardness and yield strength") with gr.Row(): clear_button = gr.Button("Clear") prediction_button = gr.Button("Predict", elem_id="submit") with gr.Row(): with gr.Column(): gr.Markdown("### Your alloy formula") formula = gr.Text(label = "Alloy formula") gr.Markdown("### The target performance of your alloy") input_hardness = gr.Text(label="Enter your target hardness (in HV)") input_yield_strength = gr.Text(label="Enter your target yield strength (MPa)") with gr.Column(): with gr.Row(): with gr.Column(): gr.Markdown("### Your optimal microstructure and processing conditions") #optimal_parameters = gr.DataFrame(label="Optimal parameters", wrap=True) with gr.Column(): param1 = gr.Text(label="Processing method") with gr.Column(): param2 = gr.Text(label="Microstructure") with gr.Column(): param3 = gr.Text(label="Phase") #with gr.Row(): #with gr.Column(): #with gr.Row(): # gr.Markdown("### Interpretation of hardness prediction") # gr.Markdown("### Interpretation of yield strength prediction") #with gr.Row(): # output_interpretation = gr.Plot(label="Interpretation") with gr.Row(): gr.Examples([example_inputs], [formula, input_hardness, input_yield_strength]) prediction_button.click( fn=predict_inverse, inputs=[input_hardness, input_yield_strength, formula], outputs=[ param1, param2, param3, ], show_progress=True, ) clear_button.click( lambda x: [gr.update(value=None)] * 6, [], [ param1, param2, param3, input_hardness, input_yield_strength, formula ], ) if __name__ == "__main__": demo.queue(concurrency_count=2) demo.launch()