diff --git a/hash_submissions.py b/hash_submissions.py deleted file mode 100644 index d0cd986..0000000 --- a/hash_submissions.py +++ /dev/null @@ -1,44 +0,0 @@ -### TESTING -### feature to hash all gradebook submission files, and check for duplicates across all students / submissions -### not fully implemented yet - only creates hashes and outputs to csv for manual inspection - -import os, sys -from datetime import datetime -import csv -import hashlib - - -def hash_files_in_dir(dir_path: str, csv_suffix: str): - os.makedirs('csv', exist_ok=True) - csv_file_name = f'file_hashes_{csv_suffix}_{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' - csv_file = os.path.join('csv', csv_file_name) - - with open(csv_file, 'w', newline='') as csvfile: # Open the output CSV file for writing - fieldnames = ['Student ID', 'file', 'sha256 hash'] - writer = csv.DictWriter(csvfile, fieldnames=fieldnames) - writer.writeheader() - - for subdir, dirs, files in os.walk(dir_path): # Loop through all files in the directory and generate hashes - for file in files: - if 'README.md' not in file: - directories = [d for d in os.path.abspath(subdir).split(os.path.sep)] # list of directories in the file path - - student_id = directories[directories.index(csv_suffix)+1] # use the index of 'csv_suffix' which is the gradebook name, and get the next directory which is the student id - filepath = os.path.join(subdir, file) - with open(filepath, 'rb') as f: - filehash = hashlib.sha256(f.read()).hexdigest() - writer.writerow({'Student ID': student_id, 'file': filepath, 'sha256 hash': filehash}) - - -def main(): - submissions_dir_name = ' '.join(sys.argv[1:]) if len(sys.argv) > 1 else exit(f'\nNo submissions dir name given. Provide the name as an argument.\n\nUsage: python {sys.argv[0]} [submissions dir name]\n') - submissions_dir = os.path.join('BB_submissions', submissions_dir_name) # dir with extracted submissions - if os.path.isdir(submissions_dir): - hash_files_in_dir(submissions_dir, submissions_dir_name) - else: - exit(f'Directory {submissions_dir} does not exist.\nMake sure "{submissions_dir_name}" exists in "BB_submissions".') - - -if __name__ == '__main__': - main() - diff --git a/inspect_submissions.py b/inspect_submissions.py new file mode 100644 index 0000000..45a55ad --- /dev/null +++ b/inspect_submissions.py @@ -0,0 +1,24 @@ +import os, sys +import pandas as pd +from datetime import datetime +from utils.inspector import hash_submissions, suspicious_by_hash + + +def main(): + submissions_dir_name = ' '.join(sys.argv[1:]) if len(sys.argv) > 1 else exit(f'\nNo submissions dir name given. Provide the name as an argument.\n\nUsage: python {sys.argv[0]} [submissions dir name]\nExample: python {sys.argv[0]} AssignmentX\n') + submissions_dir_path = os.path.join('BB_submissions', submissions_dir_name) + if not os.path.isdir(submissions_dir_path): + exit(f'Directory {submissions_dir_path} does not exist.\nMake sure "{submissions_dir_name}" exists in "BB_submissions".') + else: + hashes_csv_file_path = hash_submissions(submissions_dir_path) + + csv = pd.read_csv(hashes_csv_file_path) + df = pd.DataFrame(csv) # df with all files and their hashes + df_suspicious = suspicious_by_hash(df) # df with all files with duplicate hash, excludes files from the same student id + csv_name = f'{submissions_dir_name}_suspicious_{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' + csv_out = os.path.join('csv', csv_name) + df_suspicious.to_csv(csv_out, index=False) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/utils/inspector.py b/utils/inspector.py new file mode 100644 index 0000000..aa2c0e5 --- /dev/null +++ b/utils/inspector.py @@ -0,0 +1,55 @@ +import os +from datetime import datetime +import csv +import hashlib +import pandas as pd + +CSV_DIR = os.path.join(os.getcwd(), 'csv') + +def get_hashes_in_dir(dir_path: str) -> list: + hash_list = [] + for subdir, dirs, files in os.walk(dir_path): # Loop through all files in the directory and generate hashes + for file in files: + filepath = os.path.join(subdir, file) + with open(filepath, 'rb') as f: + filehash = hashlib.sha256(f.read()).hexdigest() + hash_list.append({ 'file': filepath, 'sha256 hash': filehash}) + return hash_list + + +def hash_submissions(submissions_dir_path: str): + os.makedirs(CSV_DIR, exist_ok=True) + + submissions_dir_name = os.path.abspath(submissions_dir_path).split(os.path.sep)[-1] + csv_file_name = f'{submissions_dir_name}_file_hashes_{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' + csv_file_path = os.path.join(CSV_DIR, csv_file_name) + with open(csv_file_path, 'w', newline='') as csvfile: # Open the output CSV file for writing + fieldnames = ['Student ID', 'file', 'sha256 hash'] + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + + for student_dir_name in os.listdir(submissions_dir_path): + student_dir_path = os.path.join(submissions_dir_path, student_dir_name) + hashes_dict = get_hashes_in_dir(student_dir_path) + for d in hashes_dict: + d.update({'Student ID': student_dir_name}) # update hash records with student id + writer.writerows(hashes_dict) + return csv_file_path + +def get_suspicious_hashes(df: pd.DataFrame) -> list: + drop_columns = ['file'] + df = df.drop(columns=drop_columns).sort_values('sha256 hash') # clear not needed colums & sort by hash + duplicate_hash = df.loc[df.duplicated(subset=['sha256 hash'], keep=False), :] # all files with duplicate hash - incl. files from the same student id + + hash_with_multiple_student_ids = duplicate_hash.groupby('sha256 hash').agg(lambda x: len(x.unique())>1) # true if more than 1 unique student ids (= multiple student ids with same hash), false if unique (= same student id re-submitting with the same hash) + + suspicious_hashes_list = hash_with_multiple_student_ids[hash_with_multiple_student_ids['Student ID']==True].index.to_list() # list with duplicate hashes - only if different student id (doesn't include attempts from same student id) + return suspicious_hashes_list + + +def suspicious_by_hash(df: pd.DataFrame) -> pd.DataFrame: + suspicious_hashes_list = get_suspicious_hashes(df) + + files_with_suspicious_hash = df[df['sha256 hash'].isin(suspicious_hashes_list)] # excluding duplicate from same student id + return files_with_suspicious_hash.sort_values(['sha256 hash', 'Student ID']) +