broke currently

This commit is contained in:
2025-06-22 16:11:29 -05:00
parent e7a0f5b1be
commit 2bb7a29141
77 changed files with 1748 additions and 2298 deletions

View File

@ -1,33 +1,45 @@
# plugins/utility/routes.py
# Standard library
import csv
import io
import uuid
import difflib
import os
import re
import uuid
import zipfile
import tempfile
import difflib
from datetime import datetime
# Thirdparty
from flask import (
Blueprint, request, render_template, redirect, flash,
session, url_for, send_file, current_app
)
from werkzeug.utils import secure_filename
from flask_login import login_required, current_user
from flask_wtf.csrf import generate_csrf
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
import qrcode
from PIL import Image, ImageDraw, ImageFont
from qrcode.image.pil import PilImage
from qrcode.constants import ERROR_CORRECT_H
# Application
from app import db
from app.neo4j_utils import get_neo4j_handler
# Plugins
from plugins.plant.models import (
db,
Plant,
PlantCommonName,
PlantScientificName,
PlantOwnershipLog,
)
from plugins.media.models import Media
from plugins.utility.models import ImportBatch # tracks which exports have been imported
from plugins.media.routes import _process_upload_file
from plugins.utility.models import ImportBatch
bp = Blueprint(
'utility',
@ -36,12 +48,13 @@ bp = Blueprint(
url_prefix='/utility'
)
@bp.route("/", methods=["GET"])
@login_required
def index():
# When someone hits /utility/, redirect to /utility/upload
return redirect(url_for("utility.upload"))
# ────────────────────────────────────────────────────────────────────────────────
# Required headers for your sub-app export ZIP
PLANT_HEADERS = [
@ -55,6 +68,7 @@ MEDIA_HEADERS = [
# Headers for standalone CSV review flow
REQUIRED_HEADERS = {"uuid", "plant_type", "name", "scientific_name", "mother_uuid"}
@bp.route("/upload", methods=["GET", "POST"])
@login_required
def upload():
@ -68,12 +82,10 @@ def upload():
# ── ZIP Import Flow ───────────────────────────────────────────────────
if filename.endswith(".zip"):
# 1) Save upload to disk
tmp_zip = tempfile.NamedTemporaryFile(delete=False, suffix=".zip")
file.save(tmp_zip.name)
tmp_zip.close()
# 2) Open as ZIP
try:
z = zipfile.ZipFile(tmp_zip.name)
except zipfile.BadZipFile:
@ -81,14 +93,12 @@ def upload():
flash("Uploaded file is not a valid ZIP.", "danger")
return redirect(request.url)
# 3) Ensure both CSVs
names = z.namelist()
if "plants.csv" not in names or "media.csv" not in names:
os.remove(tmp_zip.name)
flash("ZIP must contain both plants.csv and media.csv", "danger")
return redirect(request.url)
# 4) Read export_id from metadata.txt
export_id = None
if "metadata.txt" in names:
meta = z.read("metadata.txt").decode("utf-8", "ignore")
@ -101,13 +111,11 @@ def upload():
flash("metadata.txt missing or missing export_id", "danger")
return redirect(request.url)
# 5) Skip if already imported
if ImportBatch.query.filter_by(export_id=export_id, user_id=current_user.id).first():
os.remove(tmp_zip.name)
flash("This export has already been imported.", "info")
return redirect(request.url)
# 6) Record import batch
batch = ImportBatch(
export_id=export_id,
user_id=current_user.id,
@ -116,37 +124,35 @@ def upload():
db.session.add(batch)
db.session.commit()
# 7) Extract into temp dir
tmpdir = tempfile.mkdtemp()
z.extractall(tmpdir)
# 8) Validate plants.csv
plant_path = os.path.join(tmpdir, "plants.csv")
with open(plant_path, newline="", encoding="utf-8-sig") as pf:
reader = csv.DictReader(pf)
if reader.fieldnames != PLANT_HEADERS:
missing = set(PLANT_HEADERS) - set(reader.fieldnames or [])
extra = set(reader.fieldnames or []) - set(PLANT_HEADERS)
extra = set(reader.fieldnames or []) - set(PLANT_HEADERS)
os.remove(tmp_zip.name)
flash(f"plants.csv header mismatch. Missing: {missing}, Extra: {extra}", "danger")
return redirect(request.url)
plant_rows = list(reader)
# 9) Validate media.csv
media_path = os.path.join(tmpdir, "media.csv")
with open(media_path, newline="", encoding="utf-8-sig") as mf:
mreader = csv.DictReader(mf)
if mreader.fieldnames != MEDIA_HEADERS:
missing = set(MEDIA_HEADERS) - set(mreader.fieldnames or [])
extra = set(mreader.fieldnames or []) - set(MEDIA_HEADERS)
extra = set(mreader.fieldnames or []) - set(MEDIA_HEADERS)
os.remove(tmp_zip.name)
flash(f"media.csv header mismatch. Missing: {missing}, Extra: {extra}", "danger")
return redirect(request.url)
media_rows = list(mreader)
# 10) Import plants + Neo4j
neo = get_neo4j_handler()
added_plants = 0
plant_map = {}
for row in plant_rows:
common = PlantCommonName.query.filter_by(name=row["Name"]).first()
if not common:
@ -173,6 +179,7 @@ def upload():
)
db.session.add(p)
db.session.flush()
plant_map[p.uuid] = p.id
log = PlantOwnershipLog(
plant_id=p.id,
@ -189,42 +196,39 @@ def upload():
added_plants += 1
# 11) Import media files (by Plant UUID)
# Import media once for the full batch
added_media = 0
for mrow in media_rows:
plant_uuid = mrow["Plant UUID"]
plant_obj = Plant.query.filter_by(uuid=plant_uuid).first()
if not plant_obj:
plant_id = plant_map.get(plant_uuid)
if not plant_id:
continue
subpath = mrow["Image Path"].split('uploads/', 1)[1]
subpath = mrow["Image Path"].split('uploads/', 1)[-1]
src = os.path.join(tmpdir, "images", subpath)
if not os.path.isfile(src):
continue
dest_dir = os.path.join(
current_app.static_folder, "uploads",
str(current_user.id), str(plant_obj.id)
)
os.makedirs(dest_dir, exist_ok=True)
try:
with open(src, "rb") as f:
file_storage = FileStorage(
stream=io.BytesIO(f.read()),
filename=os.path.basename(subpath),
content_type='image/jpeg'
)
media = _process_upload_file(
file=file_storage,
uploader_id=current_user.id,
plugin="plant",
related_id=plant_id
)
media.uploaded_at = datetime.fromisoformat(mrow["Uploaded At"])
media.caption = mrow["Source Type"]
db.session.add(media)
added_media += 1
except Exception as e:
current_app.logger.warning(f"Failed to import media file: {subpath}{e}")
ext = os.path.splitext(src)[1]
fname = f"{uuid.uuid4().hex}{ext}"
dst = os.path.join(dest_dir, fname)
with open(src, "rb") as sf, open(dst, "wb") as df:
df.write(sf.read())
media = Media(
file_url=f"uploads/{current_user.id}/{plant_obj.id}/{fname}",
uploaded_at=datetime.fromisoformat(mrow["Uploaded At"]),
uploader_id=current_user.id,
caption=mrow["Source Type"],
plant_id=plant_obj.id
)
db.session.add(media)
added_media += 1
# 12) Finalize & cleanup
db.session.commit()
neo.close()
os.remove(tmp_zip.name)
@ -251,17 +255,17 @@ def upload():
review_list = []
all_common = {c.name.lower(): c for c in PlantCommonName.query.all()}
all_sci = {s.name.lower(): s for s in PlantScientificName.query.all()}
all_sci = {s.name.lower(): s for s in PlantScientificName.query.all()}
for row in reader:
uuid_raw = row.get("uuid", "")
uuid_val = uuid_raw.strip().strip('"')
name_raw = row.get("name", "")
name = name_raw.strip()
sci_raw = row.get("scientific_name", "")
sci_name = sci_raw.strip()
plant_type = row.get("plant_type", "").strip() or "plant"
mother_raw = row.get("mother_uuid", "")
uuid_raw = row.get("uuid", "")
uuid_val = uuid_raw.strip().strip('"')
name_raw = row.get("name", "")
name = name_raw.strip()
sci_raw = row.get("scientific_name", "")
sci_name = sci_raw.strip()
plant_type = row.get("plant_type", "").strip() or "plant"
mother_raw = row.get("mother_uuid", "")
mother_uuid = mother_raw.strip().strip('"')
if not (uuid_val and name and plant_type):
@ -277,11 +281,11 @@ def upload():
else None)
item = {
"uuid": uuid_val,
"name": name,
"sci_name": sci_name,
"suggested": suggested,
"plant_type": plant_type,
"uuid": uuid_val,
"name": name,
"sci_name": sci_name,
"suggested": suggested,
"plant_type": plant_type,
"mother_uuid": mother_uuid
}
review_list.append(item)
@ -293,7 +297,6 @@ def upload():
flash("Unsupported file type. Please upload a ZIP or CSV.", "danger")
return redirect(request.url)
# GET → render the upload form
return render_template("utility/upload.html", csrf_token=generate_csrf())
@ -304,8 +307,8 @@ def review():
review_list = session.get("review_list", [])
if request.method == "POST":
neo = get_neo4j_handler()
added = 0
neo = get_neo4j_handler()
added = 0
all_common = {c.name.lower(): c for c in PlantCommonName.query.all()}
all_scientific = {s.name.lower(): s for s in PlantScientificName.query.all()}
@ -416,12 +419,10 @@ def export_data():
plants_csv = plant_io.getvalue()
# 2) Gather media
media_records = (
Media.query
.filter_by(uploader_id=current_user.id)
.order_by(Media.id)
.all()
)
media_records = (Media.query
.filter_by(uploader_id=current_user.id)
.order_by(Media.id)
.all())
# Build media.csv
media_io = io.StringIO()
mw = csv.writer(media_io)
@ -440,22 +441,17 @@ def export_data():
# 3) Assemble ZIP with images from UPLOAD_FOLDER
zip_buf = io.BytesIO()
with zipfile.ZipFile(zip_buf, 'w', zipfile.ZIP_DEFLATED) as zf:
# metadata.txt
meta = (
f"export_id,{export_id}\n"
f"user_id,{current_user.id}\n"
f"exported_at,{datetime.utcnow().isoformat()}\n"
)
zf.writestr('metadata.txt', meta)
# CSV files
zf.writestr('plants.csv', plants_csv)
zf.writestr('media.csv', media_csv)
# real image files under images/
media_root = current_app.config['UPLOAD_FOLDER']
for m in media_records:
# file_url is “uploads/...”
rel = m.file_url.split('uploads/', 1)[-1]
abs_path = os.path.join(media_root, rel)
if os.path.isfile(abs_path):
@ -463,8 +459,6 @@ def export_data():
zf.write(abs_path, arcname)
zip_buf.seek(0)
# Safe filename
safe_email = re.sub(r'\W+', '_', current_user.email)
filename = f"{safe_email}_export_{export_id}.zip"
@ -474,3 +468,79 @@ def export_data():
as_attachment=True,
download_name=filename
)
# ────────────────────────────────────────────────────────────────────────────────
# QR-Code Generation Helpers & Routes
# ────────────────────────────────────────────────────────────────────────────────
def generate_label_with_name(qr_url, name, download_filename):
"""
Build a 1.5"x1.5" PNG (300dpi) with a QR code
and the plant name underneath.
"""
qr = qrcode.QRCode(version=2, error_correction=ERROR_CORRECT_H, box_size=10, border=1)
qr.add_data(qr_url)
qr.make(fit=True)
qr_img = qr.make_image(image_factory=PilImage, fill_color="black", back_color="white").convert("RGB")
dpi = 300
label_px = int(1.5 * dpi)
canvas_h = label_px + 400
label_img = Image.new("RGB", (label_px, canvas_h), "white")
label_img.paste(qr_img.resize((label_px, label_px)), (0, 0))
font_path = os.path.join(current_app.root_path, '..', 'font', 'ARIALLGT.TTF')
draw = ImageDraw.Draw(label_img)
text = (name or '').strip()
font_size = 28
while font_size > 10:
try:
font = ImageFont.truetype(font_path, font_size)
except OSError:
font = ImageFont.load_default()
if draw.textlength(text, font=font) <= label_px - 20:
break
font_size -= 1
while draw.textlength(text, font=font) > label_px - 20 and len(text) > 1:
text = text[:-1]
if len(text) < len((name or '').strip()):
text += ""
x = (label_px - draw.textlength(text, font=font)) // 2
y = label_px + 20
draw.text((x, y), text, font=font, fill="black")
buf = io.BytesIO()
label_img.save(buf, format='PNG', dpi=(dpi, dpi))
buf.seek(0)
return send_file(buf, mimetype='image/png', download_name=download_filename, as_attachment=True)
@bp.route('/<uuid:uuid_val>/download_qr', methods=['GET'])
def download_qr(uuid_val):
p = Plant.query.filter_by(uuid=uuid_val).first_or_404()
if not p.short_id:
p.short_id = Plant.generate_short_id()
db.session.commit()
qr_url = f'https://plant.cards/{p.short_id}'
filename = f"{p.short_id}.png"
return generate_label_with_name(
qr_url,
p.common_name.name or p.scientific_name,
filename
)
@bp.route('/<uuid:uuid_val>/download_qr_card', methods=['GET'])
def download_qr_card(uuid_val):
p = Plant.query.filter_by(uuid=uuid_val).first_or_404()
if not p.short_id:
p.short_id = Plant.generate_short_id()
db.session.commit()
qr_url = f'https://plant.cards/{p.short_id}'
filename = f"{p.short_id}_card.png"
return generate_label_with_name(
qr_url,
p.common_name.name or p.scientific_name,
filename
)