mirror of
https://github.com/mealie-recipes/mealie.git
synced 2026-02-14 03:43:10 -05:00
nextcloud migration
This commit is contained in:
@@ -52,7 +52,7 @@ def read_chowdown_file(recipe_file: Path) -> Recipe:
|
||||
elif x == 1:
|
||||
recipe_description = str(item)
|
||||
|
||||
except yaml.YAMLError as exc:
|
||||
except yaml.YAMLError:
|
||||
return
|
||||
|
||||
reformat_data = {
|
||||
|
||||
89
mealie/services/migrations/nextcloud.py
Normal file
89
mealie/services/migrations/nextcloud.py
Normal file
@@ -0,0 +1,89 @@
|
||||
import json
|
||||
import logging
|
||||
import shutil
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from services.recipe_services import IMG_DIR, Recipe
|
||||
from services.scrape_services import normalize_data, process_recipe_data
|
||||
|
||||
CWD = Path(__file__).parent
|
||||
TEMP_DIR = CWD.parent.parent.joinpath("data", "temp")
|
||||
MIGRTAION_DIR = CWD.parent.parent.joinpath("data", "migration")
|
||||
|
||||
|
||||
def process_selection(selection: Path) -> Path:
|
||||
if selection.is_dir():
|
||||
return selection
|
||||
elif selection.suffix == ".zip":
|
||||
with zipfile.ZipFile(selection, "r") as zip_ref:
|
||||
nextcloud_dir = TEMP_DIR.joinpath("nextcloud")
|
||||
nextcloud_dir.mkdir(exist_ok=False, parents=True)
|
||||
zip_ref.extractall(nextcloud_dir)
|
||||
return nextcloud_dir
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def import_recipes(recipe_dir: Path) -> Recipe:
|
||||
image = False
|
||||
for file in recipe_dir.glob("full.*"):
|
||||
image = file
|
||||
|
||||
recipe_file = recipe_dir.joinpath("recipe.json")
|
||||
|
||||
with open(recipe_file, "r") as f:
|
||||
recipe_dict = json.loads(f.read())
|
||||
|
||||
recipe_dict = process_recipe_data(recipe_dict)
|
||||
recipe_data = normalize_data(recipe_dict)
|
||||
|
||||
image_name = None
|
||||
if image:
|
||||
image_name = recipe_data["slug"] + image.suffix
|
||||
recipe_data["image"] = image_name
|
||||
else:
|
||||
recipe_data["image"] = "none"
|
||||
|
||||
recipe = Recipe(**recipe_data)
|
||||
|
||||
if image:
|
||||
shutil.copy(image, IMG_DIR.joinpath(image_name))
|
||||
|
||||
return recipe
|
||||
|
||||
|
||||
def prep():
|
||||
try:
|
||||
shutil.rmtree(TEMP_DIR)
|
||||
except:
|
||||
pass
|
||||
TEMP_DIR.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
|
||||
def cleanup():
|
||||
shutil.rmtree(TEMP_DIR)
|
||||
|
||||
|
||||
def migrate(selection: str):
|
||||
prep()
|
||||
MIGRTAION_DIR.mkdir(exist_ok=True)
|
||||
selection = MIGRTAION_DIR.joinpath(selection)
|
||||
|
||||
nextcloud_dir = process_selection(selection)
|
||||
|
||||
successful_imports = []
|
||||
failed_imports = []
|
||||
for dir in nextcloud_dir.iterdir():
|
||||
if dir.is_dir():
|
||||
try:
|
||||
recipe = import_recipes(dir)
|
||||
recipe.save_to_db()
|
||||
successful_imports.append(recipe.name)
|
||||
except:
|
||||
logging.error(f"Failed Nextcloud Import: {dir.name}")
|
||||
failed_imports.append(dir.name)
|
||||
|
||||
cleanup()
|
||||
|
||||
return {"successful": successful_imports, "failed": failed_imports}
|
||||
@@ -1,7 +1,6 @@
|
||||
from typing import List
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from scrape_schema_recipe import scrape_url
|
||||
from slugify import slugify
|
||||
@@ -18,7 +17,7 @@ def normalize_image_url(image) -> str:
|
||||
if type(image) == list:
|
||||
return image[0]
|
||||
elif type(image) == dict:
|
||||
return image['url']
|
||||
return image["url"]
|
||||
elif type(image) == str:
|
||||
return image
|
||||
else:
|
||||
@@ -28,7 +27,9 @@ def normalize_image_url(image) -> str:
|
||||
def normalize_instructions(instructions) -> List[dict]:
|
||||
# One long string split by (possibly multiple) new lines
|
||||
if type(instructions) == str:
|
||||
return [{"text": line.strip()} for line in filter(None, instructions.splitlines())]
|
||||
return [
|
||||
{"text": line.strip()} for line in filter(None, instructions.splitlines())
|
||||
]
|
||||
|
||||
# Plain strings in a list
|
||||
elif type(instructions) == list and type(instructions[0]) == str:
|
||||
@@ -36,7 +37,11 @@ def normalize_instructions(instructions) -> List[dict]:
|
||||
|
||||
# Dictionaries (let's assume it's a HowToStep) in a list
|
||||
elif type(instructions) == list and type(instructions[0]) == dict:
|
||||
return [{"text": step['text'].strip()} for step in instructions if step['@type'] == 'HowToStep']
|
||||
return [
|
||||
{"text": step["text"].strip()}
|
||||
for step in instructions
|
||||
if step["@type"] == "HowToStep"
|
||||
]
|
||||
|
||||
else:
|
||||
raise Exception(f"Unrecognised instruction format: {instructions}")
|
||||
@@ -51,7 +56,9 @@ def normalize_yield(yld) -> str:
|
||||
|
||||
def normalize_data(recipe_data: dict) -> dict:
|
||||
recipe_data["recipeYield"] = normalize_yield(recipe_data.get("recipeYield"))
|
||||
recipe_data["recipeInstructions"] = normalize_instructions(recipe_data["recipeInstructions"])
|
||||
recipe_data["recipeInstructions"] = normalize_instructions(
|
||||
recipe_data["recipeInstructions"]
|
||||
)
|
||||
return recipe_data
|
||||
|
||||
|
||||
@@ -67,13 +74,7 @@ def create_from_url(url: str) -> dict:
|
||||
return recipe.save_to_db()
|
||||
|
||||
|
||||
def process_recipe_url(url: str) -> dict:
|
||||
new_recipe: dict = scrape_url(url, python_objects=True)[0]
|
||||
logger.info(f"Recipe Scraped From Web: {new_recipe}")
|
||||
|
||||
if not new_recipe:
|
||||
return "fail" # TODO: Return Better Error Here
|
||||
|
||||
def process_recipe_data(new_recipe: dict, url=None) -> dict:
|
||||
slug = slugify(new_recipe["name"])
|
||||
mealie_tags = {
|
||||
"slug": slug,
|
||||
@@ -87,8 +88,22 @@ def process_recipe_url(url: str) -> dict:
|
||||
|
||||
new_recipe.update(mealie_tags)
|
||||
|
||||
return new_recipe
|
||||
|
||||
|
||||
def process_recipe_url(url: str) -> dict:
|
||||
new_recipe: dict = scrape_url(url, python_objects=True)[0]
|
||||
logger.info(f"Recipe Scraped From Web: {new_recipe}")
|
||||
|
||||
if not new_recipe:
|
||||
return "fail" # TODO: Return Better Error Here
|
||||
|
||||
new_recipe = process_recipe_data(new_recipe, url)
|
||||
|
||||
try:
|
||||
img_path = scrape_image(normalize_image_url(new_recipe.get("image")), slug)
|
||||
img_path = scrape_image(
|
||||
normalize_image_url(new_recipe.get("image")), new_recipe.get("slug")
|
||||
)
|
||||
new_recipe["image"] = img_path.name
|
||||
except:
|
||||
new_recipe["image"] = None
|
||||
|
||||
Reference in New Issue
Block a user