Files
airflow-coolify/scripts/bigquery_aggregate_layer.py
2026-03-14 23:54:24 +07:00

774 lines
36 KiB
Python

"""
BIGQUERY ANALYSIS LAYER - FOOD SECURITY AGGREGATION
Semua agregasi pakai norm_value dari _get_norm_value_df()
FIXED: Hanya simpan 4 tabel ke fs_asean_gold (layer='gold'):
- agg_pillar_composite
- agg_pillar_by_country
- agg_framework_by_country
- agg_framework_asean
"""
import pandas as pd
import numpy as np
from datetime import datetime
import logging
import json
import sys as _sys
from scripts.bigquery_config import get_bigquery_client
from scripts.bigquery_helpers import (
log_update,
load_to_bigquery,
read_from_bigquery,
setup_logging,
save_etl_metadata,
)
from google.cloud import bigquery
from sklearn.preprocessing import MinMaxScaler
# =============================================================================
# KONSTANTA GLOBAL
# =============================================================================
DIRECTION_INVERT_KEYWORDS = frozenset({
"negative", "lower_better", "lower_is_better", "inverse", "neg",
})
DIRECTION_POSITIVE_KEYWORDS = frozenset({
"positive", "higher_better", "higher_is_better",
})
NORMALIZE_FRAMEWORKS_JOINTLY = False
# =============================================================================
# Windows CP1252 safe logging
# =============================================================================
class _SafeStreamHandler(logging.StreamHandler):
def emit(self, record):
try:
super().emit(record)
except UnicodeEncodeError:
try:
msg = self.format(record)
self.stream.write(
msg.encode("utf-8", errors="replace").decode("ascii", errors="replace")
+ self.terminator
)
self.flush()
except Exception:
self.handleError(record)
# =============================================================================
# HELPERS
# =============================================================================
def _should_invert(direction: str, logger=None, context: str = "") -> bool:
d = str(direction).lower().strip()
if d in DIRECTION_INVERT_KEYWORDS:
return True
if d in DIRECTION_POSITIVE_KEYWORDS:
return False
if logger:
logger.warning(
f" [DIRECTION WARNING] Unknown direction '{direction}' "
f"{'(' + context + ')' if context else ''}. Defaulting to positive (no invert)."
)
return False
def global_minmax(series: pd.Series, lo: float = 1.0, hi: float = 100.0) -> pd.Series:
values = series.dropna().values
if len(values) == 0:
return pd.Series(np.nan, index=series.index)
v_min, v_max = values.min(), values.max()
if v_min == v_max:
return pd.Series((lo + hi) / 2.0, index=series.index)
scaler = MinMaxScaler(feature_range=(lo, hi))
result = np.full(len(series), np.nan)
not_nan = series.notna()
result[not_nan.values] = scaler.fit_transform(
series[not_nan].values.reshape(-1, 1)
).flatten()
return pd.Series(result, index=series.index)
def add_yoy(df: pd.DataFrame, group_cols: list, score_col: str) -> pd.DataFrame:
df = df.sort_values(group_cols + ["year"]).reset_index(drop=True)
if group_cols:
df["year_over_year_change"] = df.groupby(group_cols)[score_col].diff()
else:
df["year_over_year_change"] = df[score_col].diff()
return df
def safe_int(series: pd.Series, fill: int = 0, col_name: str = "", logger=None) -> pd.Series:
n_nan = series.isna().sum()
if n_nan > 0 and logger:
logger.warning(
f" [NaN WARNING] Kolom '{col_name}' punya {n_nan} NaN -> di-fill dengan {fill}"
)
return series.fillna(fill).astype(int)
def check_and_dedup(df: pd.DataFrame, key_cols: list, context: str = "", logger=None) -> pd.DataFrame:
dupes = df.duplicated(subset=key_cols, keep=False)
if dupes.any():
n_dupes = dupes.sum()
if logger:
logger.warning(
f" [DEDUP WARNING] {context}: {n_dupes} duplikat rows pada {key_cols}. "
f"Di-aggregate dengan mean."
)
numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
agg_dict = {
c: ("mean" if c in numeric_cols else "first")
for c in df.columns if c not in key_cols
}
df = df.groupby(key_cols, as_index=False).agg(agg_dict)
return df
# =============================================================================
# MAIN CLASS
# =============================================================================
class FoodSecurityAggregator:
def __init__(self, client: bigquery.Client):
self.client = client
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.propagate = False
self.load_metadata = {
"agg_pillar_composite": {"rows_loaded": 0, "status": "pending", "start_time": None, "end_time": None},
"agg_pillar_by_country": {"rows_loaded": 0, "status": "pending", "start_time": None, "end_time": None},
"agg_framework_by_country": {"rows_loaded": 0, "status": "pending", "start_time": None, "end_time": None},
"agg_framework_asean": {"rows_loaded": 0, "status": "pending", "start_time": None, "end_time": None},
}
self.df = None
self.dims = {}
self.sdgs_start_year = None
self.mdgs_indicator_ids = set()
self.sdgs_indicator_ids = set()
# =========================================================================
# STEP 1: Load data dari Gold layer
# =========================================================================
def load_data(self):
self.logger.info("=" * 70)
self.logger.info("STEP 1: LOAD DATA from fs_asean_gold")
self.logger.info("=" * 70)
self.df = read_from_bigquery(self.client, "analytical_food_security", layer='gold')
self.logger.info(f" analytical_food_security : {len(self.df):,} rows")
self.dims["country"] = read_from_bigquery(self.client, "dim_country", layer='gold')
self.dims["indicator"] = read_from_bigquery(self.client, "dim_indicator", layer='gold')
self.dims["pillar"] = read_from_bigquery(self.client, "dim_pillar", layer='gold')
self.dims["time"] = read_from_bigquery(self.client, "dim_time", layer='gold')
ind_cols = ["indicator_id"]
if "direction" in self.dims["indicator"].columns:
ind_cols.append("direction")
self.df = (
self.df
.merge(self.dims["time"][["time_id", "year"]], on="time_id", how="left")
.merge(self.dims["country"][["country_id", "country_name"]], on="country_id", how="left")
.merge(self.dims["pillar"][["pillar_id", "pillar_name"]], on="pillar_id", how="left")
.merge(self.dims["indicator"][ind_cols], on="indicator_id", how="left")
)
if "direction" not in self.df.columns:
self.df["direction"] = "positive"
else:
n_null_dir = self.df["direction"].isna().sum()
if n_null_dir > 0:
self.logger.warning(f" [DIRECTION] {n_null_dir} rows dengan direction NULL -> diisi 'positive'")
self.df["direction"] = self.df["direction"].fillna("positive")
dir_dist = self.df.drop_duplicates("indicator_id")["direction"].value_counts()
self.logger.info(f"\n Distribusi direction per indikator:")
for d, cnt in dir_dist.items():
tag = "INVERT" if _should_invert(d, self.logger, "load_data check") else "normal"
self.logger.info(f" {d:<25} : {cnt:>3} indikator [{tag}]")
self.logger.info(f"\n Setelah join: {len(self.df):,} rows")
self.logger.info(f" Negara : {self.df['country_id'].nunique()}")
self.logger.info(f" Indikator : {self.df['indicator_id'].nunique()}")
self.logger.info(f" Tahun : {int(self.df['year'].min())} - {int(self.df['year'].max())}")
# =========================================================================
# STEP 1b: Klasifikasi indikator ke MDGs / SDGs
# =========================================================================
def _classify_indicators(self):
self.logger.info("\n" + "=" * 70)
self.logger.info("STEP 1b: KLASIFIKASI INDIKATOR -> MDGs / SDGs")
self.logger.info("=" * 70)
ind_min_year = (
self.df.groupby("indicator_id")["year"]
.min().reset_index()
.rename(columns={"year": "min_year"})
)
unique_years = sorted(ind_min_year["min_year"].unique())
self.logger.info(f"\n Unique min_year per indikator: {unique_years}")
if len(unique_years) == 1:
gap_threshold = unique_years[0] + 1
self.logger.info(" Hanya 1 cluster -> semua = MDGs")
else:
gaps = [
(unique_years[i+1] - unique_years[i], unique_years[i], unique_years[i+1])
for i in range(len(unique_years) - 1)
]
gaps.sort(reverse=True)
largest_gap_size, y_before, y_after = gaps[0]
gap_threshold = y_after
self.logger.info(f" Gap terbesar: {y_before} -> {y_after} (selisih {largest_gap_size})")
ind_min_year["framework"] = ind_min_year["min_year"].apply(
lambda y: "MDGs" if int(y) < gap_threshold else "SDGs"
)
sdgs_rows = ind_min_year[ind_min_year["framework"] == "SDGs"]
self.sdgs_start_year = int(sdgs_rows["min_year"].min()) if not sdgs_rows.empty else int(self.df["year"].max()) + 1
self.logger.info(f" sdgs_start_year: {self.sdgs_start_year}")
self.mdgs_indicator_ids = set(ind_min_year[ind_min_year["framework"] == "MDGs"]["indicator_id"].tolist())
self.sdgs_indicator_ids = set(ind_min_year[ind_min_year["framework"] == "SDGs"]["indicator_id"].tolist())
self.logger.info(f" MDGs: {len(self.mdgs_indicator_ids)} indicators")
self.logger.info(f" SDGs: {len(self.sdgs_indicator_ids)} indicators")
self.df = self.df.merge(ind_min_year[["indicator_id", "framework"]], on="indicator_id", how="left")
# =========================================================================
# CORE HELPER: normalisasi raw value per indikator
# =========================================================================
def _get_norm_value_df(self) -> pd.DataFrame:
if "framework" not in self.df.columns:
raise ValueError("Kolom 'framework' tidak ada. Pastikan _classify_indicators() dipanggil lebih dulu.")
norm_parts = []
for ind_id, grp in self.df.groupby("indicator_id"):
grp = grp.copy()
direction = str(grp["direction"].iloc[0])
do_invert = _should_invert(direction, self.logger, context=f"indicator_id={ind_id}")
valid_mask = grp["value"].notna()
n_valid = valid_mask.sum()
if n_valid < 2:
grp["norm_value"] = np.nan
norm_parts.append(grp)
continue
scaler = MinMaxScaler(feature_range=(0, 1))
normed = np.full(len(grp), np.nan)
normed[valid_mask.values] = scaler.fit_transform(
grp.loc[valid_mask, ["value"]]
).flatten()
if do_invert:
normed = np.where(np.isnan(normed), np.nan, 1.0 - normed)
grp["norm_value"] = normed
norm_parts.append(grp)
return pd.concat(norm_parts, ignore_index=True)
# =========================================================================
# STEP 2: agg_pillar_composite -> Gold
# =========================================================================
def calc_pillar_composite(self) -> pd.DataFrame:
table_name = "agg_pillar_composite"
self.load_metadata[table_name]["start_time"] = datetime.now()
self.logger.info("\n" + "=" * 70)
self.logger.info(f"STEP 2: {table_name} -> [Gold] fs_asean_gold")
self.logger.info("=" * 70)
df_normed = self._get_norm_value_df()
df = (
df_normed
.groupby(["pillar_id", "pillar_name", "year"])
.agg(
pillar_norm =("norm_value", "mean"),
n_indicators=("indicator_id", "nunique"),
n_countries =("country_id", "nunique"),
)
.reset_index()
)
df["pillar_score_1_100"] = global_minmax(df["pillar_norm"])
df["rank_in_year"] = (
df.groupby("year")["pillar_score_1_100"]
.rank(method="min", ascending=False)
.astype(int)
)
df = add_yoy(df, ["pillar_id"], "pillar_score_1_100")
df["pillar_id"] = df["pillar_id"].astype(int)
df["year"] = df["year"].astype(int)
df["n_indicators"] = safe_int(df["n_indicators"], col_name="n_indicators", logger=self.logger)
df["n_countries"] = safe_int(df["n_countries"], col_name="n_countries", logger=self.logger)
df["rank_in_year"] = df["rank_in_year"].astype(int)
df["pillar_norm"] = df["pillar_norm"].astype(float)
df["pillar_score_1_100"] = df["pillar_score_1_100"].astype(float)
schema = [
bigquery.SchemaField("pillar_id", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("pillar_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("year", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("pillar_norm", "FLOAT", mode="REQUIRED"),
bigquery.SchemaField("n_indicators", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("n_countries", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("pillar_score_1_100", "FLOAT", mode="REQUIRED"),
bigquery.SchemaField("rank_in_year", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("year_over_year_change", "FLOAT", mode="NULLABLE"),
]
rows = load_to_bigquery(self.client, df, table_name, layer='gold', write_disposition="WRITE_TRUNCATE", schema=schema)
self._finalize(table_name, rows)
return df
# =========================================================================
# STEP 3: agg_pillar_by_country -> Gold
# =========================================================================
def calc_pillar_by_country(self) -> pd.DataFrame:
table_name = "agg_pillar_by_country"
self.load_metadata[table_name]["start_time"] = datetime.now()
self.logger.info("\n" + "=" * 70)
self.logger.info(f"STEP 3: {table_name} -> [Gold] fs_asean_gold")
self.logger.info("=" * 70)
df_normed = self._get_norm_value_df()
df = (
df_normed
.groupby(["country_id", "country_name", "pillar_id", "pillar_name", "year"])
.agg(pillar_country_norm=("norm_value", "mean"))
.reset_index()
)
df["pillar_country_score_1_100"] = global_minmax(df["pillar_country_norm"])
df["rank_in_pillar_year"] = (
df.groupby(["pillar_id", "year"])["pillar_country_score_1_100"]
.rank(method="min", ascending=False)
.astype(int)
)
df = add_yoy(df, ["country_id", "pillar_id"], "pillar_country_score_1_100")
df["country_id"] = df["country_id"].astype(int)
df["pillar_id"] = df["pillar_id"].astype(int)
df["year"] = df["year"].astype(int)
df["rank_in_pillar_year"] = df["rank_in_pillar_year"].astype(int)
df["pillar_country_norm"] = df["pillar_country_norm"].astype(float)
df["pillar_country_score_1_100"] = df["pillar_country_score_1_100"].astype(float)
schema = [
bigquery.SchemaField("country_id", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("country_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("pillar_id", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("pillar_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("year", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("pillar_country_norm", "FLOAT", mode="REQUIRED"),
bigquery.SchemaField("pillar_country_score_1_100", "FLOAT", mode="REQUIRED"),
bigquery.SchemaField("rank_in_pillar_year", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("year_over_year_change", "FLOAT", mode="NULLABLE"),
]
rows = load_to_bigquery(self.client, df, table_name, layer='gold', write_disposition="WRITE_TRUNCATE", schema=schema)
self._finalize(table_name, rows)
return df
# =========================================================================
# STEP 4: agg_framework_by_country -> Gold
# =========================================================================
def _calc_country_composite_inmemory(self) -> pd.DataFrame:
"""Hitung country composite in-memory (tidak disimpan ke BQ)."""
df_normed = self._get_norm_value_df()
df = (
df_normed
.groupby(["country_id", "country_name", "year"])
.agg(
composite_score=("norm_value", "mean"),
n_indicators =("indicator_id", "nunique"),
)
.reset_index()
)
df["score_1_100"] = global_minmax(df["composite_score"])
df["rank_in_asean"] = (
df.groupby("year")["score_1_100"]
.rank(method="min", ascending=False)
.astype(int)
)
df = add_yoy(df, ["country_id"], "score_1_100")
df["country_id"] = df["country_id"].astype(int)
df["year"] = df["year"].astype(int)
df["n_indicators"] = safe_int(df["n_indicators"], col_name="n_indicators", logger=self.logger)
df["composite_score"] = df["composite_score"].astype(float)
df["score_1_100"] = df["score_1_100"].astype(float)
df["rank_in_asean"] = df["rank_in_asean"].astype(int)
return df
def calc_framework_by_country(self) -> pd.DataFrame:
table_name = "agg_framework_by_country"
self.load_metadata[table_name]["start_time"] = datetime.now()
self.logger.info("\n" + "=" * 70)
self.logger.info(f"STEP 4: {table_name} -> [Gold] fs_asean_gold")
self.logger.info("=" * 70)
country_composite = self._calc_country_composite_inmemory()
df_normed = self._get_norm_value_df()
parts = []
# Layer TOTAL
agg_total = (
country_composite[[
"country_id", "country_name", "year",
"score_1_100", "n_indicators", "composite_score"
]]
.copy()
.rename(columns={"score_1_100": "framework_score_1_100", "composite_score": "framework_norm"})
)
agg_total["framework"] = "Total"
parts.append(agg_total)
# Layer MDGs — Era pre-SDGs = Total
pre_sdgs_rows = country_composite[country_composite["year"] < self.sdgs_start_year].copy()
if not pre_sdgs_rows.empty:
mdgs_pre = (
pre_sdgs_rows[["country_id", "country_name", "year", "score_1_100", "n_indicators", "composite_score"]]
.copy()
.rename(columns={"score_1_100": "framework_score_1_100", "composite_score": "framework_norm"})
)
mdgs_pre["framework"] = "MDGs"
parts.append(mdgs_pre)
# Layer MDGs — Era mixed
if self.mdgs_indicator_ids:
df_mdgs_mixed = df_normed[
(df_normed["indicator_id"].isin(self.mdgs_indicator_ids)) &
(df_normed["year"] >= self.sdgs_start_year)
].copy()
if not df_mdgs_mixed.empty:
agg_mdgs_mixed = (
df_mdgs_mixed
.groupby(["country_id", "country_name", "year"])
.agg(framework_norm=("norm_value", "mean"), n_indicators=("indicator_id", "nunique"))
.reset_index()
)
if not NORMALIZE_FRAMEWORKS_JOINTLY:
agg_mdgs_mixed["framework_score_1_100"] = global_minmax(agg_mdgs_mixed["framework_norm"])
agg_mdgs_mixed["framework"] = "MDGs"
parts.append(agg_mdgs_mixed)
# Layer SDGs
if self.sdgs_indicator_ids:
df_sdgs = df_normed[
(df_normed["indicator_id"].isin(self.sdgs_indicator_ids)) &
(df_normed["year"] >= self.sdgs_start_year)
].copy()
if not df_sdgs.empty:
agg_sdgs = (
df_sdgs
.groupby(["country_id", "country_name", "year"])
.agg(framework_norm=("norm_value", "mean"), n_indicators=("indicator_id", "nunique"))
.reset_index()
)
if not NORMALIZE_FRAMEWORKS_JOINTLY:
agg_sdgs["framework_score_1_100"] = global_minmax(agg_sdgs["framework_norm"])
agg_sdgs["framework"] = "SDGs"
parts.append(agg_sdgs)
df = pd.concat(parts, ignore_index=True)
if NORMALIZE_FRAMEWORKS_JOINTLY:
mixed_mask = (df["framework"].isin(["MDGs", "SDGs"])) & (df["year"] >= self.sdgs_start_year)
if mixed_mask.any():
df.loc[mixed_mask, "framework_score_1_100"] = global_minmax(df.loc[mixed_mask, "framework_norm"])
df = check_and_dedup(df, ["country_id", "framework", "year"], context=table_name, logger=self.logger)
df["rank_in_framework_year"] = (
df.groupby(["framework", "year"])["framework_score_1_100"]
.rank(method="min", ascending=False)
.astype(int)
)
df = add_yoy(df, ["country_id", "framework"], "framework_score_1_100")
df["country_id"] = df["country_id"].astype(int)
df["year"] = df["year"].astype(int)
df["n_indicators"] = safe_int(df["n_indicators"], col_name="n_indicators", logger=self.logger)
df["rank_in_framework_year"] = safe_int(df["rank_in_framework_year"], col_name="rank_in_framework_year", logger=self.logger)
df["framework_norm"] = df["framework_norm"].astype(float)
df["framework_score_1_100"] = df["framework_score_1_100"].astype(float)
self._validate_mdgs_equals_total(df, level="country")
schema = [
bigquery.SchemaField("country_id", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("country_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("year", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("framework", "STRING", mode="REQUIRED"),
bigquery.SchemaField("n_indicators", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("framework_norm", "FLOAT", mode="REQUIRED"),
bigquery.SchemaField("framework_score_1_100", "FLOAT", mode="REQUIRED"),
bigquery.SchemaField("rank_in_framework_year", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("year_over_year_change", "FLOAT", mode="NULLABLE"),
]
rows = load_to_bigquery(self.client, df, table_name, layer='gold', write_disposition="WRITE_TRUNCATE", schema=schema)
self._finalize(table_name, rows)
return df
# =========================================================================
# STEP 5: agg_framework_asean -> Gold
# =========================================================================
def calc_framework_asean(self) -> pd.DataFrame:
table_name = "agg_framework_asean"
self.load_metadata[table_name]["start_time"] = datetime.now()
self.logger.info("\n" + "=" * 70)
self.logger.info(f"STEP 5: {table_name} -> [Gold] fs_asean_gold")
self.logger.info("=" * 70)
df_normed = self._get_norm_value_df()
country_composite = self._calc_country_composite_inmemory()
country_norm = (
df_normed.groupby(["country_id", "country_name", "year"])["norm_value"]
.mean().reset_index().rename(columns={"norm_value": "country_norm"})
)
asean_overall = (
country_norm.groupby("year")
.agg(asean_norm=("country_norm", "mean"), std_norm=("country_norm", "std"),
n_countries=("country_norm", "count"))
.reset_index()
)
asean_overall["asean_score_1_100"] = global_minmax(asean_overall["asean_norm"])
asean_comp = (
country_composite.groupby("year")["composite_score"]
.mean().reset_index().rename(columns={"composite_score": "asean_composite"})
)
asean_overall = asean_overall.merge(asean_comp, on="year", how="left")
parts = []
# Layer TOTAL
total_cols = asean_overall[["year", "asean_score_1_100", "asean_norm", "std_norm", "n_countries"]].copy()
total_cols = total_cols.rename(columns={
"asean_score_1_100": "framework_score_1_100",
"asean_norm": "framework_norm",
"n_countries": "n_countries_with_data",
})
n_ind_total = df_normed.groupby("year")["indicator_id"].nunique().reset_index().rename(columns={"indicator_id": "n_indicators"})
total_cols = total_cols.merge(n_ind_total, on="year", how="left")
total_cols["framework"] = "Total"
parts.append(total_cols)
# Layer MDGs — pre-SDGs = Total
pre_sdgs = asean_overall[asean_overall["year"] < self.sdgs_start_year].copy()
if not pre_sdgs.empty:
mdgs_pre = pre_sdgs[["year", "asean_score_1_100", "asean_norm", "std_norm", "n_countries"]].copy()
mdgs_pre = mdgs_pre.rename(columns={
"asean_score_1_100": "framework_score_1_100",
"asean_norm": "framework_norm",
"n_countries": "n_countries_with_data",
})
n_ind_pre = df_normed[df_normed["year"] < self.sdgs_start_year].groupby("year")["indicator_id"].nunique().reset_index().rename(columns={"indicator_id": "n_indicators"})
mdgs_pre = mdgs_pre.merge(n_ind_pre, on="year", how="left")
mdgs_pre["framework"] = "MDGs"
parts.append(mdgs_pre)
# Layer MDGs — mixed
if self.mdgs_indicator_ids:
df_mdgs_mixed = df_normed[
(df_normed["indicator_id"].isin(self.mdgs_indicator_ids)) &
(df_normed["year"] >= self.sdgs_start_year)
].copy()
if not df_mdgs_mixed.empty:
cn = df_mdgs_mixed.groupby(["country_id", "year"])["norm_value"].mean().reset_index().rename(columns={"norm_value": "country_norm"})
asean_mdgs = cn.groupby("year").agg(
framework_norm=("country_norm", "mean"),
std_norm=("country_norm", "std"),
n_countries_with_data=("country_id", "count"),
).reset_index()
n_ind_mdgs = df_mdgs_mixed.groupby("year")["indicator_id"].nunique().reset_index().rename(columns={"indicator_id": "n_indicators"})
asean_mdgs = asean_mdgs.merge(n_ind_mdgs, on="year", how="left")
if not NORMALIZE_FRAMEWORKS_JOINTLY:
asean_mdgs["framework_score_1_100"] = global_minmax(asean_mdgs["framework_norm"])
asean_mdgs["framework"] = "MDGs"
parts.append(asean_mdgs)
# Layer SDGs
if self.sdgs_indicator_ids:
df_sdgs = df_normed[
(df_normed["indicator_id"].isin(self.sdgs_indicator_ids)) &
(df_normed["year"] >= self.sdgs_start_year)
].copy()
if not df_sdgs.empty:
cn = df_sdgs.groupby(["country_id", "year"])["norm_value"].mean().reset_index().rename(columns={"norm_value": "country_norm"})
asean_sdgs = cn.groupby("year").agg(
framework_norm=("country_norm", "mean"),
std_norm=("country_norm", "std"),
n_countries_with_data=("country_id", "count"),
).reset_index()
n_ind_sdgs = df_sdgs.groupby("year")["indicator_id"].nunique().reset_index().rename(columns={"indicator_id": "n_indicators"})
asean_sdgs = asean_sdgs.merge(n_ind_sdgs, on="year", how="left")
if not NORMALIZE_FRAMEWORKS_JOINTLY:
asean_sdgs["framework_score_1_100"] = global_minmax(asean_sdgs["framework_norm"])
asean_sdgs["framework"] = "SDGs"
parts.append(asean_sdgs)
df = pd.concat(parts, ignore_index=True)
if NORMALIZE_FRAMEWORKS_JOINTLY:
mixed_mask = (df["framework"].isin(["MDGs", "SDGs"])) & (df["year"] >= self.sdgs_start_year)
if mixed_mask.any():
df.loc[mixed_mask, "framework_score_1_100"] = global_minmax(df.loc[mixed_mask, "framework_norm"])
df = check_and_dedup(df, ["framework", "year"], context=table_name, logger=self.logger)
df = add_yoy(df, ["framework"], "framework_score_1_100")
df["year"] = df["year"].astype(int)
df["n_indicators"] = safe_int(df["n_indicators"], col_name="n_indicators", logger=self.logger)
df["n_countries_with_data"] = safe_int(df["n_countries_with_data"], col_name="n_countries_with_data", logger=self.logger)
for col in ["framework_norm", "std_norm", "framework_score_1_100"]:
df[col] = df[col].astype(float)
self._validate_mdgs_equals_total(df, level="asean")
schema = [
bigquery.SchemaField("year", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("framework", "STRING", mode="REQUIRED"),
bigquery.SchemaField("n_indicators", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("n_countries_with_data", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("framework_norm", "FLOAT", mode="REQUIRED"),
bigquery.SchemaField("std_norm", "FLOAT", mode="NULLABLE"),
bigquery.SchemaField("framework_score_1_100", "FLOAT", mode="REQUIRED"),
bigquery.SchemaField("year_over_year_change", "FLOAT", mode="NULLABLE"),
]
rows = load_to_bigquery(self.client, df, table_name, layer='gold', write_disposition="WRITE_TRUNCATE", schema=schema)
self._finalize(table_name, rows)
return df
# =========================================================================
# HELPERS
# =========================================================================
def _validate_mdgs_equals_total(self, df: pd.DataFrame, level: str = ""):
self.logger.info(f"\n Validasi MDGs < {self.sdgs_start_year} == Total [{level}]:")
group_by = ["year"] if level.startswith("asean") else ["country_id", "year"]
mdgs_pre = df[(df["framework"] == "MDGs") & (df["year"] < self.sdgs_start_year)][group_by + ["framework_score_1_100"]].rename(columns={"framework_score_1_100": "mdgs_score"})
total_pre = df[(df["framework"] == "Total") & (df["year"] < self.sdgs_start_year)][group_by + ["framework_score_1_100"]].rename(columns={"framework_score_1_100": "total_score"})
if mdgs_pre.empty and total_pre.empty:
self.logger.info(f" -> Tidak ada data pre-{self.sdgs_start_year} (skip)")
return
if mdgs_pre.empty or total_pre.empty:
self.logger.warning(f" -> [WARNING] Salah satu kosong: MDGs={len(mdgs_pre)}, Total={len(total_pre)}")
return
check = mdgs_pre.merge(total_pre, on=group_by)
max_diff = (check["mdgs_score"] - check["total_score"]).abs().max()
status = "OK (identik)" if max_diff < 0.01 else f"MISMATCH! max_diff={max_diff:.6f}"
self.logger.info(f" -> {status} (n_checked={len(check)})")
def _finalize(self, table_name: str, rows_loaded: int):
self.load_metadata[table_name].update({
"rows_loaded": rows_loaded, "status": "success", "end_time": datetime.now(),
})
log_update(self.client, "DW", table_name, "full_load", rows_loaded)
self.logger.info(f"{table_name}: {rows_loaded:,} rows → [Gold] fs_asean_gold")
self.logger.info(f" Metadata → [AUDIT] etl_logs")
def _fail(self, table_name: str, error: Exception):
self.load_metadata[table_name].update({"status": "failed", "end_time": datetime.now()})
self.logger.error(f" [FAIL] {table_name}: {error}")
log_update(self.client, "DW", table_name, "full_load", 0, "failed", str(error))
# =========================================================================
# RUN
# =========================================================================
def run(self):
start = datetime.now()
self.logger.info("\n" + "=" * 70)
self.logger.info("FOOD SECURITY AGGREGATION v8.0 — 4 TABLES -> fs_asean_gold")
self.logger.info("=" * 70)
self.load_data()
self._classify_indicators()
self.calc_pillar_composite()
self.calc_pillar_by_country()
self.calc_framework_by_country()
self.calc_framework_asean()
duration = (datetime.now() - start).total_seconds()
total_rows = sum(m["rows_loaded"] for m in self.load_metadata.values())
self.logger.info("\n" + "=" * 70)
self.logger.info("SELESAI")
self.logger.info("=" * 70)
self.logger.info(f" Durasi : {duration:.2f}s")
self.logger.info(f" Total rows : {total_rows:,}")
for tbl, meta in self.load_metadata.items():
icon = "" if meta["status"] == "success" else ""
self.logger.info(f" {icon} {tbl:<35} {meta['rows_loaded']:>10,}")
# =============================================================================
# AIRFLOW TASK FUNCTIONS
# =============================================================================
def run_aggregation():
"""
Airflow task: Hitung semua agregasi dari analytical_food_security.
Dipanggil setelah analytical_layer_to_gold selesai.
"""
from scripts.bigquery_config import get_bigquery_client
client = get_bigquery_client()
agg = FoodSecurityAggregator(client)
agg.run()
total = sum(m["rows_loaded"] for m in agg.load_metadata.values())
print(f"Aggregation completed: {total:,} total rows loaded")
# =============================================================================
# MAIN EXECUTION
# =============================================================================
if __name__ == "__main__":
import io
if _sys.stdout.encoding and _sys.stdout.encoding.lower() not in ("utf-8", "utf8"):
_sys.stdout = io.TextIOWrapper(_sys.stdout.buffer, encoding="utf-8", errors="replace")
if _sys.stderr.encoding and _sys.stderr.encoding.lower() not in ("utf-8", "utf8"):
_sys.stderr = io.TextIOWrapper(_sys.stderr.buffer, encoding="utf-8", errors="replace")
print("=" * 70)
print("FOOD SECURITY AGGREGATION v8.0 — 4 TABLES -> fs_asean_gold")
print(f" NORMALIZE_FRAMEWORKS_JOINTLY = {NORMALIZE_FRAMEWORKS_JOINTLY}")
print("=" * 70)
logger = setup_logging()
for handler in logger.handlers:
handler.__class__ = _SafeStreamHandler
client = get_bigquery_client()
agg = FoodSecurityAggregator(client)
agg.run()
print("\n" + "=" * 70)
print("[OK] SELESAI")
print("=" * 70)