ggs production data
This commit is contained in:
@@ -1,5 +1,8 @@
|
||||
import csv
|
||||
from Levenshtein import distance
|
||||
import pandas as pd
|
||||
import uuid
|
||||
|
||||
|
||||
# TODO Filter für Spalten, ggfs. Klasse benötigt
|
||||
|
||||
@@ -42,21 +45,87 @@ def find_similar_pairs(pair, other_data):
|
||||
return similar_pairs
|
||||
|
||||
|
||||
def create_uuid():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def add_hl_tag(row):
|
||||
return 'HL0707104-' + row['klasse']
|
||||
|
||||
|
||||
def create_import_list(path, path_new, old_pairs, new_pairs, common_pairs):
|
||||
df = pd.read_csv(path, sep=';', encoding='utf-8')
|
||||
df = df[~df[['name', 'vorname']].apply(tuple, axis=1).isin(old_pairs)]
|
||||
df = df.drop('username', axis=1, errors='ignore')
|
||||
df = df.drop('klasse', axis=1, errors='ignore')
|
||||
# print(len(df))
|
||||
# print(df)
|
||||
new_data = pd.read_csv(path_new, sep=';', encoding='utf-8')
|
||||
|
||||
# Bei Schüler: alte Klassen gelöscht, mit neuen Klassen aus new-data auffüllen
|
||||
|
||||
matches = new_data[~new_data[['name', 'vorname']].apply(tuple, axis=1).isin(new_pairs)]
|
||||
matches.loc[:, 'klasse'] = matches.apply(add_hl_tag, axis=1)
|
||||
# print(len(matches))
|
||||
# print(matches)
|
||||
df = pd.merge(df, matches, how='outer', left_on=['name', 'vorname'], right_on=['name', 'vorname'])
|
||||
df = df[['name', 'vorname', 'klasse', 'schuelerid', 'mailUserQuota', 'oxUserQuota', 'oxContext']]
|
||||
# print(df)
|
||||
print(len(df))
|
||||
|
||||
|
||||
new_data = new_data[~new_data[['name', 'vorname']].apply(tuple, axis=1).isin(common_pairs)]
|
||||
# new_data = new_data.drop('Unnamed: 2', axis=1, errors='ignore')
|
||||
new_uuids = []
|
||||
for row in range(len(new_data)):
|
||||
new_uuids.append(create_uuid())
|
||||
# Klasse?? Unterschied zwischen Lehrer und Schüler
|
||||
#new_data['klasse'] = None
|
||||
new_data.loc[:, 'klasse'] = new_data.apply(add_hl_tag, axis=1)
|
||||
new_data.insert(loc=2, column='schuelerid', value=new_uuids)
|
||||
mailUserQuota = 1024
|
||||
oxUserQuota = 5120
|
||||
oxContext = 16
|
||||
new_data['mailUserQuota'] = mailUserQuota
|
||||
new_data['oxUserQuota'] = oxUserQuota
|
||||
new_data['oxContext'] = oxContext
|
||||
# print(new_data)
|
||||
print(len(new_data))
|
||||
|
||||
# vor dem merge daten ergänzen
|
||||
# import_df = pd.merge(df, new_data, how='outer', left_on=['name', 'vorname', 'mailUserQuota', 'oxUserQuota', 'oxContext'], right_on=['name', 'vorname', 'mailUserQuota', 'oxUserQuota', 'oxContext'])
|
||||
import_df = pd.merge(df, new_data, how='outer', left_on=['name', 'vorname', 'klasse', 'schuelerid', 'mailUserQuota', 'oxUserQuota', 'oxContext'], right_on=['name', 'vorname', 'klasse', 'schuelerid', 'mailUserQuota', 'oxUserQuota', 'oxContext'])
|
||||
# pd.set_option('display.max_rows', None)
|
||||
# pd.set_option('display.max_columns', None)
|
||||
# print(import_df)
|
||||
# print(len(import_df))
|
||||
# pd.reset_option('display.max_rows')
|
||||
# pd.reset_option('display.max_columns')
|
||||
out_path = '../GGS/outputSchueler.csv'
|
||||
import_df.to_csv(out_path, index=False)
|
||||
print(len(import_df))
|
||||
print('Schüler Import Liste erzeugt')
|
||||
print('Testuser manuell nachtragen!!')
|
||||
|
||||
|
||||
def main():
|
||||
file1_path = '../GGS/ggsSnew.csv' # Pfad zur ersten CSV-Datei
|
||||
file2_path = '../GGS/ggsSold2.cvs' # Pfad zur zweiten CSV-Datei
|
||||
file3_path = '../GGS/downloadS.csv'
|
||||
|
||||
common_pairs, unique_pairs1, unique_pairs2, data1, data2 = compare_csv(file1_path, file2_path)
|
||||
common_pairs, new_pairs, old_pairs, data1, data2 = compare_csv(file1_path, file2_path)
|
||||
|
||||
print(f"Anzahl der übereinstimmenden Paare: {len(common_pairs)}")
|
||||
print(f"Anzahl der neuen Einträge: {len(unique_pairs1)}")
|
||||
print(f"Anzahl der veralteten Einträge: {len(unique_pairs2)}")
|
||||
print(f"Anzahl der neuen Einträge: {len(new_pairs)}")
|
||||
print(f"Anzahl der veralteten Einträge: {len(old_pairs)}")
|
||||
|
||||
for pair in data1:
|
||||
similar_pairs_list2 = find_similar_pairs(pair, set(data2) - {pair})
|
||||
# for pair in data1:
|
||||
# similar_pairs_list2 = find_similar_pairs(pair, set(data2) - {pair})
|
||||
#
|
||||
# if similar_pairs_list2:
|
||||
# print(f"Ähnliche Paare zu {pair} in Liste 2: {similar_pairs_list2}")
|
||||
|
||||
if similar_pairs_list2:
|
||||
print(f"Ähnliche Paare zu {pair} in Liste 2: {similar_pairs_list2}")
|
||||
create_import_list(file3_path, file1_path, old_pairs, new_pairs, common_pairs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
Reference in New Issue
Block a user