simone-papicchio commited on
Commit
9aa07eb
·
1 Parent(s): fc2145b

fix: pickle read with wrong func

Browse files
Files changed (2) hide show
  1. app.py +4 -5
  2. utilities.py +1 -23
app.py CHANGED
@@ -28,7 +28,7 @@ import utilities as us
28
  # def wrapper(*args, **kwargs):
29
  # return func(*args, **kwargs)
30
  # return wrapper
31
- pnp_path = os.path.join(".", "evaluation_p_np_metrics.csv")
32
 
33
  js_func = """
34
  function refresh() {
@@ -50,8 +50,7 @@ df_default = pd.DataFrame({
50
  'Age': [25, 30, 35],
51
  'City': ['New York', 'Los Angeles', 'Chicago']
52
  })
53
- models_path = os.path.join(".", "models.csv")
54
- #models_path = "./models.csv"
55
 
56
  # Variabile globale per tenere traccia dei dati correnti
57
  df_current = df_default.copy()
@@ -82,7 +81,7 @@ def load_data(file, path, use_default):
82
  input_data["input_method"] = 'uploaded_file'
83
  input_data["db_name"] = os.path.splitext(os.path.basename(file))[0]
84
  #input_data["data_path"] = os.path.join(".", "data", "data_interface",f"{input_data['db_name']}.sqlite")
85
- input_data["data_path"] = os.path.join(".", f"{input_data['db_name']}.sqlite")
86
  input_data["data"] = us.load_data(file, input_data["db_name"])
87
  df_current = input_data["data"]['data_frames'].get('MyTable', df_default) # Carica il DataFrame
88
  if(input_data["data"]['data_frames'] and input_data["data"]["db"] is None): #for csv and xlsx files
@@ -130,7 +129,7 @@ def load_data(file, path, use_default):
130
  #input_data["data_path"] = os.path.join(".", "data", "spider_databases", "defeault.sqlite")
131
  #input_data["db_name"] = "default"
132
  #input_data["data"]['db'] = SqliteConnector(relative_db_path=input_data["data_path"], db_name=input_data["db_name"])
133
- input_data["data"]['data_frames'] = us.extract_tables_dict(pnp_path)
134
  return input_data["data"]['data_frames']
135
 
136
  selected_inputs = sum([file is not None, bool(path), use_default])
 
28
  # def wrapper(*args, **kwargs):
29
  # return func(*args, **kwargs)
30
  # return wrapper
31
+ pnp_path = "evaluation_p_np_metrics.csv"
32
 
33
  js_func = """
34
  function refresh() {
 
50
  'Age': [25, 30, 35],
51
  'City': ['New York', 'Los Angeles', 'Chicago']
52
  })
53
+ models_path ="models.csv"
 
54
 
55
  # Variabile globale per tenere traccia dei dati correnti
56
  df_current = df_default.copy()
 
81
  input_data["input_method"] = 'uploaded_file'
82
  input_data["db_name"] = os.path.splitext(os.path.basename(file))[0]
83
  #input_data["data_path"] = os.path.join(".", "data", "data_interface",f"{input_data['db_name']}.sqlite")
84
+ input_data["data_path"] = f"{input_data['db_name']}.sqlite"
85
  input_data["data"] = us.load_data(file, input_data["db_name"])
86
  df_current = input_data["data"]['data_frames'].get('MyTable', df_default) # Carica il DataFrame
87
  if(input_data["data"]['data_frames'] and input_data["data"]["db"] is None): #for csv and xlsx files
 
129
  #input_data["data_path"] = os.path.join(".", "data", "spider_databases", "defeault.sqlite")
130
  #input_data["db_name"] = "default"
131
  #input_data["data"]['db'] = SqliteConnector(relative_db_path=input_data["data_path"], db_name=input_data["db_name"])
132
+ input_data["data"]['data_frames'] = us.load_tables_dict_from_pkl('tables_dict.pkl')
133
  return input_data["data"]['data_frames']
134
 
135
  selected_inputs = sum([file is not None, bool(path), use_default])
utilities.py CHANGED
@@ -108,28 +108,6 @@ def generate_some_samples(connector, tbl_name):
108
  samples.append(f"Error: {e}")
109
  return samples
110
 
111
- def load_tables_dict(file_path):
112
  with open(file_path, 'rb') as f:
113
  return pickle.load(f)
114
-
115
- def extract_tables_dict(pnp_path):
116
- tables_dict = {}
117
- with open(pnp_path, mode='r', encoding='utf-8') as file:
118
- reader = csv.DictReader(file)
119
- tbl_db_pairs = set() # Use a set to avoid duplicates
120
- for row in reader:
121
- tbl_name = row.get("tbl_name")
122
- db_path = row.get("db_path")
123
- if tbl_name and db_path:
124
- tbl_db_pairs.add((tbl_name, db_path)) # Add the pair to the set
125
- for tbl_name, db_path in list(tbl_db_pairs):
126
- if tbl_name and db_path:
127
- connector = sqlite3.connect(db_path)
128
- query = f"SELECT * FROM {tbl_name} LIMIT 5"
129
- try:
130
- df = pd.read_sql_query(query, connector)
131
- tables_dict[tbl_name] = df
132
- except Exception as e:
133
- tables_dict[tbl_name] = pd.DataFrame({"Error": [str(e)]}) # DataFrame con messaggio di errore
134
- return load_tables_dict('tables_dict.csv')
135
- return tables_dict
 
108
  samples.append(f"Error: {e}")
109
  return samples
110
 
111
+ def load_tables_dict_from_pkl(file_path):
112
  with open(file_path, 'rb') as f:
113
  return pickle.load(f)