Some changes

master
Sebastian Serfling 2025-10-31 18:33:40 +01:00
parent 4b40b177c3
commit 12058896c3
6 changed files with 143 additions and 76 deletions

8
.env
View File

@ -1,5 +1,9 @@
### MSSQL-Server Adresse
MSSQL_CONNECTION_STR="Driver={SQL Server};Server=DMS;Database=Waage_Steinbruch;UID=waage;PWD=!LKWWaage2025#;"
## Test Datenbank
MSSQL_CONNECTION_STR="Driver={SQL Server};Server=172.17.1.25;Database=balzerwaagenpc;UID=sa;PWD=adm.3dfx12;"
## LIVE Datenbank
#MSSQL_CONNECTION_STR="Driver={SQL Server};Server=DMS;Database=Waage_Steinbruch;UID=waage;PWD=!LKWWaage2025#;"
### Mail Einstellungen
E_MAIL_ADDRESS="serfling@itdata-gera.de"
@ -11,7 +15,7 @@ E_MAIL_SEND_TO="serfling@itdata-gera.de"
### SDF Einstellungen für MSSQL Importer/Exporter
#SDF_LOCAL_PFAD="D:/Waagen-PC/"
## Test Pfad
SDF_LOCAL_PFAD="C:/Users/Sebastian Serfling/PycharmProjects/Balzer-WaagenDaten/"
SDF_LOCAL_PFAD="C:/Users/Sebastian Serfling/PycharmProjects/Balzer-WaagenDaten"
SDF_NAME="App.sdf"
### Log Einstellungen für MSSQL Importer/Exporter

BIN
App.sdf

Binary file not shown.

Binary file not shown.

View File

@ -1,8 +1,8 @@
import os
import traceback
import datetime
import adodbapi
import pyodbc
import adodbapi
from dotenv import load_dotenv
import mail
@ -20,21 +20,23 @@ logfile_path = os.path.join(LOG_DIR, logfile_name)
process = "SDF_to_MSSQL_Export"
mssql_cursor_global = None
global_error_log = [] # ❗ hier sammeln wir alle Fehler aus allen Tabellen
# =========================
# LOGGING
# LOGGING-FUNKTIONEN
# =========================
def write_log(line: str):
timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
log_entry = f"{line}\n------------------------------{timestamp}------------------------------"
line = line.encode('ascii', errors='ignore').decode()
print(line)
with open(logfile_path, "a", encoding="utf-8") as f:
f.write(log_entry + "\n")
def write_log_summary(table, inserted, skipped, errors, inserted_rows):
"""Schreibt kurze Zusammenfassung in MSSQL-logs."""
"""Schreibt eine kurze Zusammenfassung pro Tabelle in MSSQL-logs"""
if not mssql_cursor_global:
return
try:
@ -44,10 +46,10 @@ def write_log_summary(table, inserted, skipped, errors, inserted_rows):
message = f"{inserted} eingefügt, {skipped} übersprungen, {errors} Fehler"
if inserted_rows:
message += " | " + "; ".join(inserted_rows[:5])
msg = f"Tabelle {table}: {message}"
full_message = f"Tabelle {table}: {message}"
mssql_cursor_global.execute(
"INSERT INTO logs (timestamp, message, process) VALUES (?, ?, ?)",
datetime.datetime.now(), msg, process
datetime.datetime.now(), full_message, process
)
mssql_cursor_global.connection.commit()
except Exception as e:
@ -68,12 +70,7 @@ if not MSSQL_CONNECTION_STR:
mail.send_error_email(msg, process)
exit(1)
sdf_connection_str = (
"Provider=Microsoft.SQLSERVER.CE.OLEDB.3.5;"
f"Data Source={sdf_file};"
"Persist Security Info=False;"
)
# Tabellen aus .env
tables_env = os.getenv("TABLES", "")
tables = [t.strip() for t in tables_env.split(",") if t.strip()]
@ -82,7 +79,7 @@ tables = [t.strip() for t in tables_env.split(",") if t.strip()]
# HELFER
# =========================
def get_pk_columns(mssql_cursor, table_name):
query = """
pk_query = """
SELECT KU.COLUMN_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS TC
INNER JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KU
@ -91,7 +88,7 @@ def get_pk_columns(mssql_cursor, table_name):
AND KU.TABLE_NAME = ?
ORDER BY KU.ORDINAL_POSITION;
"""
mssql_cursor.execute(query, (table_name,))
mssql_cursor.execute(pk_query, (table_name,))
return [row[0] for row in mssql_cursor.fetchall()]
@ -109,7 +106,9 @@ def row_summary(columns, row):
def main():
global mssql_cursor_global
try:
# --- MSSQL ---
# ========================
# MSSQL-Verbindung
# ========================
try:
mssql_conn = pyodbc.connect(MSSQL_CONNECTION_STR)
mssql_cursor = mssql_conn.cursor()
@ -119,11 +118,15 @@ def main():
err_msg = f"❌ Fehler bei der Verbindung zu MSSQL: {mssql_err}"
write_log(err_msg)
mail.send_error_email(err_msg, process)
#return # abbrechen
return
# --- SDF ---
# ========================
# SDF-Verbindung
# ========================
try:
sdf_conn = adodbapi.connect(sdf_connection_str)
sdf_conn = adodbapi.connect(
f"Provider=Microsoft.SQLSERVER.CE.OLEDB.3.5;Data Source={sdf_file};Persist Security Info=False;"
)
sdf_cursor = sdf_conn.cursor()
write_log("✅ Verbindung zur SDF erfolgreich geöffnet.")
except Exception as sdf_err:
@ -137,15 +140,19 @@ def main():
# ============ Tabellen-Export ============
for table in tables:
write_log(f"\n🔹 Verarbeite Tabelle: {table}")
error_messages = []
try:
sdf_cursor.execute(f"SELECT * FROM [{table}]")
columns = [col[0] for col in sdf_cursor.description]
columns = [desc[0] for desc in sdf_cursor.description]
write_log(f"Spalten in {table}: {columns}")
except Exception as e:
write_log(f"❌ Fehler beim Lesen der Tabelle {table}: {e}")
msg = f"❌ Fehler beim Lesen der SDF-Tabelle {table}: {e}"
write_log(msg)
global_error_log.append(msg)
continue
# Primärschlüssel aus MSSQL
pk_columns = get_pk_columns(mssql_cursor, table)
if pk_columns:
write_log(f"Primary Key(s) in {table}: {pk_columns}")
@ -163,8 +170,6 @@ def main():
inserted_rows = []
rows = sdf_cursor.fetchall()
write_log(f"{len(rows)} Datensätze in {table} gefunden.")
for row in rows:
try:
if pk_indices:
@ -176,7 +181,9 @@ def main():
skipped += 1
continue
mssql_cursor.execute(insert_sql, row)
# NULL-Behandlung
row = [None if v in (None, '', 'NULL') else v for v in row]
mssql_cursor.execute(insert_sql, tuple(row))
inserted += 1
if len(inserted_rows) < 5:
inserted_rows.append(row_summary(columns, row))
@ -185,20 +192,26 @@ def main():
errors += 1
error_details = f"Fehler beim Einfügen in {table}: {insert_err}"
write_log(error_details)
mail.send_error_email(error_details, process)
error_messages.append(error_details)
mssql_conn.commit()
sdf_conn.commit()
# Tabelle fertig → loggen
write_log(f"{table}: {inserted} eingefügt, {skipped} übersprungen, {errors} Fehler.")
write_log_summary(table, inserted, skipped, errors, inserted_rows)
report_lines.append(f"{table}: {inserted} eingefügt, {skipped} übersprungen, {errors} Fehler.")
# --- .export-Marker ---
export_marker_path = os.path.join(MAIN_DIR, ".export")
with open(export_marker_path, "w"):
pass
write_log(f"Leere .export-Datei erstellt: {export_marker_path}")
if error_messages:
global_error_log.append(f"\nTabelle {table}:\n" + "\n".join(error_messages[:20]))
# --- Abschlusslog ---
# --- Abschluss ---
marker_path = os.path.join(MAIN_DIR, ".export")
with open(marker_path, "w"):
pass
write_log(f"Leere .export-Datei erstellt: {marker_path}")
# Logeintrag in MSSQL
try:
mssql_cursor.execute(
"INSERT INTO logs (timestamp, message, process) VALUES (?, ?, ?)",
@ -213,6 +226,18 @@ def main():
mssql_cursor.close()
mssql_conn.close()
# --- Sammelmail am Ende ---
if global_error_log:
combined_errors = "\n\n".join(global_error_log)
mail.send_error_email(
f"⚠️ Export abgeschlossen mit Fehlern:\n\n{combined_errors}",
process
)
else:
mail.send_success_email("✅ Export erfolgreich abgeschlossen keine Fehler.", process)
write_log("🏁 Exportprozess beendet.")
except Exception as e:
err = f"Allgemeiner Fehler: {e}\n{traceback.format_exc()}"
write_log(err)

View File

@ -47,3 +47,9 @@ def send_report_email(msg: str, process: str):
subject = f"✅ Prozess abgeschlossen: {process}"
body = f"Prozess: {process}\nZeit: {datetime.datetime.now()}\n\nErgebnisbericht:\n{msg}"
send_email(subject, body)
def send_success_email(msg: str, process: str):
"""Sendet Erfolgs-Mail"""
subject = f"✅ Erfolg: {process}"
body = "Alles OK!"
send_email(subject, body)

View File

@ -1,60 +1,92 @@
import adodbapi
import csv
import os
import adodbapi
import pyodbc
from dotenv import load_dotenv
sdf_file = r"C:\Users\Sebastian Serfling\PycharmProjects\Balzer-WaagenDaten\App.sdf"
output_csv = os.path.join(os.path.dirname(sdf_file), "Weighing_LDB_recovery.csv")
load_dotenv()
print(f"📂 Versuche Wiederherstellung aus: {sdf_file}")
# --- ENV Variablen ---
SDF_LOCAL_PFAD = os.getenv("SDF_LOCAL_PFAD")
SDF_NAME = os.getenv("SDF_NAME", "App.sdf")
sdf_file = os.path.join(SDF_LOCAL_PFAD, SDF_NAME)
MSSQL_CONNECTION_STR = os.getenv("MSSQL_CONNECTION_STR")
tables = [t.strip() for t in os.getenv("TABLES", "").split(",") if t.strip()]
conn_str = (
"Provider=Microsoft.SQLSERVER.CE.OLEDB.3.5;"
f"Data Source={sdf_file};Persist Security Info=False;"
print(f"📂 Verbinde mit SDF: {sdf_file}")
sdf_conn = adodbapi.connect(
f"Provider=Microsoft.SQLSERVER.CE.OLEDB.3.5;Data Source={sdf_file};Persist Security Info=False;"
)
sdf_cursor = sdf_conn.cursor()
try:
conn = adodbapi.connect(conn_str)
cursor = conn.cursor()
print("✅ Verbindung erfolgreich.")
print("🔗 Verbinde mit MSSQL...")
mssql_conn = pyodbc.connect(MSSQL_CONNECTION_STR)
mssql_cursor = mssql_conn.cursor()
# Versuche, alle Spaltennamen zu ermitteln
# --- Hilfsfunktion zur Datentyp-Konvertierung ---
def sdf_to_mssql_type(sdf_type, length):
sdf_type = sdf_type.lower()
if "int" in sdf_type:
return "INT"
if "float" in sdf_type or "double" in sdf_type:
return "FLOAT"
if "decimal" in sdf_type or "numeric" in sdf_type:
return "DECIMAL(18,2)"
if "datetime" in sdf_type or "date" in sdf_type:
return "DATETIME"
if "bit" in sdf_type or "bool" in sdf_type:
return "BIT"
if "image" in sdf_type or "binary" in sdf_type:
return "VARBINARY(MAX)"
if "ntext" in sdf_type or "text" in sdf_type:
return "NVARCHAR(MAX)"
if "char" in sdf_type or "string" in sdf_type:
return f"NVARCHAR({length if length and length > 0 else 255})"
return "NVARCHAR(MAX)"
# --- Tabellen erzeugen ---
for table in tables:
print(f"\n📋 Analysiere Tabelle: {table}")
try:
cursor.execute("SELECT * FROM [Weighing_LDB] WHERE 1=0")
columns = [col[0] for col in cursor.description]
print(f"📋 Gefundene Spalten: {columns}")
except Exception as e:
print(f"⚠️ Fehler beim Lesen der Spalten: {e}")
sdf_cursor.execute(f"SELECT * FROM [{table}] WHERE 1=0")
columns = []
column_names = []
# Versuch, Spalte für Spalte zu lesen
recovered_rows = []
error_columns = []
for col in sdf_cursor.description:
col_name = col[0]
if col_name.lower() == "id":
# Id-Spalte wird immer INT PRIMARY KEY
columns.append("[Id] INT PRIMARY KEY")
else:
col_type = str(col[1])
col_len = col[3]
sql_type = sdf_to_mssql_type(col_type, col_len)
columns.append(f"[{col_name}] {sql_type}")
column_names.append(col_name)
# Prüfen, ob überhaupt eine Id-Spalte existiert
if "Id" not in column_names and "ID" not in column_names:
print("⚠️ Keine Id-Spalte gefunden füge Id INT PRIMARY KEY hinzu.")
columns.insert(0, "[Id] INT PRIMARY KEY")
create_sql = f"CREATE TABLE [{table}] (\n " + ",\n ".join(columns) + "\n)"
print(create_sql)
for col in columns:
try:
cursor.execute(f"SELECT TOP 5 [{col}] FROM [Weighing_LDB]")
rows = cursor.fetchall()
recovered_rows.append((col, [r[0] for r in rows]))
print(f"✅ Spalte {col}: {len(rows)} Werte gelesen")
mssql_cursor.execute(create_sql)
mssql_conn.commit()
print(f"✅ Tabelle [{table}] in MSSQL erstellt.")
except Exception as inner:
print(f"⚠️ Tabelle [{table}] konnte nicht erstellt werden (vielleicht existiert sie bereits): {inner}")
except Exception as e:
print(f"❌ Fehler in Spalte {col}: {e}")
error_columns.append(col)
print(f"❌ Fehler beim Lesen von {table}: {e}")
if recovered_rows:
print(f"\n💾 Schreibe Ergebnis in: {output_csv}")
with open(output_csv, "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(["Spalte", "Beispielwerte"])
for col, vals in recovered_rows:
writer.writerow([col, str(vals[:5])])
print("✅ Teildaten erfolgreich exportiert.")
# --- Aufräumen ---
sdf_cursor.close()
sdf_conn.close()
mssql_cursor.close()
mssql_conn.close()
if error_columns:
print(f"\n⚠️ Problematische Spalten: {error_columns}")
cursor.close()
conn.close()
print("\n🔚 Wiederherstellung abgeschlossen.")
except Exception as e:
print(f"❌ Fehler beim Zugriff auf SDF: {e}")
print("\n🏁 Fertig!")