From 07c3d0a14ca12159ec89e5699f62ba3672f85a64 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Falk=20He=C3=9Fe?= <falk.hesse@uba.de>
Date: Wed, 5 Feb 2025 13:02:47 +0000
Subject: [PATCH 1/3] scraper for discharge data from the grcd

---
 deployment/harvester.toml                    |  16 ++
 deployment/manual_datasets/Wissenschaft.toml |  14 ++
 deployment/origins.toml                      |   7 +
 harvester/src/main.rs                        |   4 +-
 harvester/src/portal_grdc_bfg.rs             | 177 +++++++++++++++++
 harvester/src/website_grdc_bfg.rs            | 191 +++++++++++++++++++
 6 files changed, 408 insertions(+), 1 deletion(-)
 create mode 100644 harvester/src/portal_grdc_bfg.rs
 create mode 100644 harvester/src/website_grdc_bfg.rs

diff --git a/deployment/harvester.toml b/deployment/harvester.toml
index 8fbc989c2e..99a2c72b74 100644
--- a/deployment/harvester.toml
+++ b/deployment/harvester.toml
@@ -1281,3 +1281,19 @@ type = "csw"
 url = "https://teodoor.icg.kfa-juelich.de/geonetwork/srv/ger/csw?constraintLanguage=CQL_TEXT"
 origins = ["/Wissenschaft/Helmholtz-Gemeinschaft/Forschungszentrum Jülich/TERENO Geoportal"]
 source_url = "http://teodoor.icg.kfa-juelich.de/geonetwork/apps/search/?uuid={{id}}"
+
+[[sources]]
+name = "portal-grdc-bfg"
+type = "portal_grdc_bfg"
+url = "https://portal.grdc.bafg.de/"
+origins = ["/Bund/BfG/GRDC"]
+max_age = "1w"
+age_smear = "3d"
+
+[[sources]]
+name = "website-grdc-bfg"
+type = "website_grdc_bfg"
+url = "https://grdc.bafg.de/"
+origins = ["/Bund/BfG/GRDC"]
+max_age = "1w"
+age_smear = "3d"
diff --git a/deployment/manual_datasets/Wissenschaft.toml b/deployment/manual_datasets/Wissenschaft.toml
index d1b643c2e2..acb286236f 100644
--- a/deployment/manual_datasets/Wissenschaft.toml
+++ b/deployment/manual_datasets/Wissenschaft.toml
@@ -42,3 +42,17 @@ resources = [ { type = "Pdf", url = "https://essd.copernicus.org/articles/16/562
               { type = "WebPage", url = "https://doi.org/10.5194/essd-16-5625-2024", description = 'Link zur Artikelübersicht beim Journal "Earth System Science Data"', direct_link = false, primary_content = false }
  ]
 
+[[datasets]]
+id = "european_water_archive"
+title = "European Water Archive"
+description = 'Since its inception in 1985 the European Water Archive (EWA) collected long-term daily flow data and catchment information from more than 4000 river gauging stations in 30 countries. In October 2014 the EURO-FRIEND-Water meeting in Montpellier concluded that the EWA will no longer be updated and will be \"frozen at its current state\". The Global Runoff Data Centre (GRDC), Federal Institute of Hydrology, Koblenz, Germany, hosts the European Water Archive since 2004. By release and on behalf of the data providers, the former EWA stations and flow data will be integrated in the Global Runoff Database. The archived data remain unchanged and will be provided by GRDC on special request by writing to grdc@bafg.de. Please use the GRDC station catalogue for up-to-date data of stations in Europe incl. those which have been transferred to the GRDC database. GRDB currently holds river discharge time series data from more than 3000 gauging stations in Europe with earliest records from 1806 and an average time series length of 53 years, with a range from 1 to 214 years.'
+types = [ "Dataset" ]
+license = "AllRightsReserved"
+origins = ["/Bund/BfG/GRDC"]
+language = "English"
+source_url = "https://grdc.bafg.de/"
+resources = [ { type = "MicrosoftExcelSpreadsheet", url = "https://grdc.bafg.de/downloads/EWA.xlsx", description = "Station Catalogue", direct_link = true, primary_content = true },
+              { type = "Pdf", url = "https://grdc.bafg.de/downloads/grdc_ewa_summary_regions.pdf", description = "Summary Statistics by WMO region", direct_link = true, primary_content = true },
+              { type = "Pdf", url = "https://grdc.bafg.de/downloads/grdc_ewa_summary_countries.pdf", description = "Summary Statistics by Country", direct_link = true, primary_content = true },
+              { type = "Pdf", url = "https://grdc.bafg.de/downloads/grdc_ewa_summary_basins.pdf", description = "Summary Statistics by WMO basin", direct_link = true, primary_content = true },
+ ]
diff --git a/deployment/origins.toml b/deployment/origins.toml
index e1b1ee3f56..f9e0031781 100644
--- a/deployment/origins.toml
+++ b/deployment/origins.toml
@@ -324,6 +324,13 @@ contact_url = "https://www.wasserblick.net/servlet/is/32282/"
 email = "langstengel@bafg.de"
 description = "WasserBLIcK ist eine umfassende nationale Kommunikations- und Berichtsplattform der Bund/Länder-Arbeitsgemeinschaft Wasser (LAWA)."
 
+["Bund/BfG/GRDC"]
+name = "Global Runoff Data Centre"
+about_url = "https://grdc.bafg.de/imprint/"
+contact_url = "https://grdc.bafg.de/help/contact/"
+email = "grdc@bafg.de"
+description = "Das Global Runoff Data Centre archiviert weltweite Abflussdaten mit mittleren Tages- und Monatswerten."
+
 ["Bund/BfG/Undine"]
 name = "Informationsplattform Undine"
 about_url = "https://undine.bafg.de/index.html"
diff --git a/harvester/src/main.rs b/harvester/src/main.rs
index e87f2834cd..bff611ea78 100644
--- a/harvester/src/main.rs
+++ b/harvester/src/main.rs
@@ -55,6 +55,7 @@ mod oai;
 mod odl_info_bfs;
 mod pegelonline_gdws;
 mod piveau_search;
+mod portal_grdc_bfg;
 mod probas_uba;
 mod rekis;
 mod ressourceneffizienz_bmuv;
@@ -85,6 +86,7 @@ mod website_base;
 mod website_bfn;
 mod website_bfs;
 mod website_bge;
+mod website_grdc_bfg;
 mod website_publications_uba;
 mod website_uba;
 mod wikidata;
@@ -240,7 +242,7 @@ dispatch_harvester!(
     uvp_verbund
     website_bfn wisia_bfn flora_web_bfn naturdetektive_bfn rote_liste_zentrum_bfn ffh_vp_info_bfn envir_acts_bfn
     aktion_flaeche_uba diffuse_quellen_uba gewaesserbewertung_uba gewaesserberechnung_uba giir_uba kommunales_abwasser_uba probas_uba ufordat_uba umweltchronik_uba thru_de_uba stadt_land_plus_uba website_uba luftdaten_uba mudab_uba runder_tisch_meeresmuell_uba blauer_engel_uba upb_uba upb_website_uba uip_uba uvp_portal_uba website_publications_uba
-    wasser_de_bfg wasserblick_bfg undine_bfg
+    portal_grdc_bfg website_grdc_bfg wasser_de_bfg wasserblick_bfg undine_bfg
     website_bfs doris_bfs sar_bfs odl_info_bfs
     website_base endlagersuche_base
     elwis_gdws pegelonline_gdws
diff --git a/harvester/src/portal_grdc_bfg.rs b/harvester/src/portal_grdc_bfg.rs
new file mode 100644
index 0000000000..a84a94a292
--- /dev/null
+++ b/harvester/src/portal_grdc_bfg.rs
@@ -0,0 +1,177 @@
+use std::io::{Cursor, Read};
+
+use anyhow::{anyhow, Error, Result};
+use calamine::{open_workbook_from_rs, Data, DataType, Reader, Xlsx};
+use cap_std::fs::Dir;
+use smallvec::smallvec;
+use time::{Date, Month};
+use zip::ZipArchive;
+
+use harvester::{
+    client::Client,
+    fetch_many,
+    utilities::{make_key, point_like_bounding_box},
+    write_dataset, Source,
+};
+use metadaten::{
+    dataset::{
+        r#type::{Domain, Station, Type},
+        Dataset, Language, License, Organisation, OrganisationRole, Region, SourceUrlExplainer,
+    },
+    wise::WISE,
+};
+
+pub async fn harvest(dir: &Dir, client: &Client, source: &Source) -> Result<(usize, usize, usize)> {
+    let station_list = fetch_station_list(client, source).await?;
+
+    let count = station_list.len();
+
+    let (results, errors) = fetch_many(0, 0, station_list, |station| {
+        translate_station_dataset(dir, client, source, station)
+    })
+    .await;
+
+    Ok((count, results, errors))
+}
+
+async fn fetch_station_list(client: &Client, source: &Source) -> Result<Vec<Item>> {
+    let url = source.url.join("grdc/grdc_stations.zip")?;
+
+    let bytes = client
+        .make_request(
+            source,
+            "station_list".to_owned(),
+            Some(&url),
+            |client| async {
+                let bytes = client
+                    .get(url.clone())
+                    .send()
+                    .await?
+                    .error_for_status()?
+                    .bytes()
+                    .await?;
+
+                let mut archive = ZipArchive::new(Cursor::new(&*bytes))?;
+
+                let mut bytes = Vec::new();
+
+                archive
+                    .by_name("GRDC_Stations.xlsx")?
+                    .read_to_end(&mut bytes)?;
+
+                Ok::<_, Error>(bytes)
+            },
+        )
+        .await?;
+
+    let mut workbook: Xlsx<_> = open_workbook_from_rs(Cursor::new(&*bytes))?;
+    let worksheet = workbook.worksheet_range("station_catalogue")?;
+
+    let station_list = worksheet
+        .rows()
+        .filter(|row| row[5] == "DE")
+        .map(|row| Item {
+            station_id: row[0].to_string(),
+            river: row[3].to_string(),
+            name: row[4].to_string(),
+            lat: row[6].clone(),
+            lon: row[7].clone(),
+            start_year: row[18].clone(),
+            end_year: row[19].clone(),
+        })
+        .collect::<Vec<_>>();
+
+    Ok(station_list)
+}
+
+async fn translate_station_dataset(
+    dir: &Dir,
+    client: &Client,
+    source: &Source,
+    item: Item,
+) -> Result<(usize, usize, usize)> {
+    let key = make_key(&item.station_id).into_owned();
+
+    let url = source
+        .url
+        .join("applications/public.html?publicuser=PublicUser#dataDownload/Stations")?;
+
+    let title = format!("Messstation {} am Fluss {}", item.name, item.river);
+    let description = format!(
+        r#"Dieser Datensatz enthält Abflussdaten der Messstation {} am Fluss {}.
+        Die Daten werden bereit gestellt vom Global Runoff Data Centre (GRDC).
+        Um die Rohdaten für diese Station zu erhalten, muss das Data Portal des GRDC ({}) verwendet werden.
+        Dort kann man entweder anhand des Station Name: {} oder der Station Number: {} nach den Daten suchen."#,
+        item.name, item.river, url, item.name, item.station_id
+    );
+
+    let types = smallvec![Type::Measurements {
+        domain: Domain::Rivers,
+        station: Some(Station {
+            id: Some(item.station_id.into()),
+            ..Default::default()
+        }),
+        measured_variables: smallvec!["Abfluss".to_owned()],
+        methods: Default::default(),
+    }];
+
+    let region = Region::Other(item.name.into());
+    let mut regions = smallvec![region];
+
+    let lat = item.lat.as_f64().ok_or_else(|| anyhow!("Missing lat"))?;
+    let lon = item.lon.as_f64().ok_or_else(|| anyhow!("Missing lon"))?;
+
+    let bounding_boxes = smallvec![point_like_bounding_box(lat, lon)];
+    regions.extend(WISE.match_shape(lon, lat).map(Region::Watershed));
+
+    // data source only contains start and end year of the time series
+    let start_year = item
+        .start_year
+        .as_i64()
+        .ok_or_else(|| anyhow!("Missing start year"))?;
+    let end_year = item
+        .end_year
+        .as_i64()
+        .ok_or_else(|| anyhow!("Missing start year"))?;
+
+    let start_date = Date::from_calendar_date(start_year as i32, Month::January, 1)?;
+    let end_date = Date::from_calendar_date(end_year as i32, Month::December, 31)?;
+    let time_ranges = smallvec![(start_date, end_date).into()];
+
+    let provider = Organisation::WikiData {
+        identifier: 119010386, // GRDC
+        role: OrganisationRole::Publisher,
+    };
+
+    let dataset = Dataset {
+        title,
+        description: Some(description),
+        types,
+        bounding_boxes,
+        regions,
+        time_ranges,
+        organisations: smallvec![provider],
+        language: Language::English,
+        license: License::OtherClosed,
+        origins: source.origins.clone(),
+        source_url: url.into(),
+        source_url_explainer: SourceUrlExplainer::CopyStationId(
+            "zum Datenportal: bitte dort die Suchfunktion nutzen (Station Number wird automatisch kopiert)"
+                .to_owned(),
+        ),
+        ..Default::default()
+    };
+
+    write_dataset(dir, client, source, key, dataset).await
+}
+
+#[derive(Debug, Clone)]
+struct Item {
+    river: String,
+    name: String,
+    station_id: String,
+    lat: Data,
+    lon: Data,
+    start_year: Data,
+    end_year: Data,
+}
diff --git a/harvester/src/website_grdc_bfg.rs b/harvester/src/website_grdc_bfg.rs
new file mode 100644
index 0000000000..78c17cfdd1
--- /dev/null
+++ b/harvester/src/website_grdc_bfg.rs
@@ -0,0 +1,191 @@
+use anyhow::Result;
+use cap_std::fs::Dir;
+use scraper::{Html, Selector};
+use smallvec::smallvec;
+use time::{macros::format_description, Date};
+
+use harvester::{
+    client::Client,
+    fetch_many, selectors,
+    utilities::{collect_text, make_key, select_text},
+    write_dataset, Source,
+};
+use metadaten::dataset::{
+    r#type::{TextType, Type},
+    Dataset, Language, License, Organisation, OrganisationRole, Resource, ResourceType,
+};
+
+pub async fn harvest(dir: &Dir, client: &Client, source: &Source) -> Result<(usize, usize, usize)> {
+    let selectors = &Selectors::default();
+
+    let news_list = fetch_news_list(client, source, selectors).await?;
+    let mut count = news_list.len();
+    let (results, errors) = fetch_many(0, 0, news_list, |news_item| {
+        translate_news_dataset(dir, client, source, news_item, selectors)
+    })
+    .await;
+
+    let publication_list = fetch_publication_list(client, source, selectors).await?;
+    count += publication_list.len();
+    let (results, errors) = fetch_many(results, errors, publication_list, |publication_item| {
+        translate_publication_dataset(dir, client, source, publication_item)
+    })
+    .await;
+
+    Ok((count, results, errors))
+}
+
+async fn fetch_news_list(
+    client: &Client,
+    source: &Source,
+    selectors: &Selectors,
+) -> Result<Vec<Item>> {
+    let url = source.url.join("news/")?;
+
+    let text = client
+        .fetch_text(source, "news_list".to_owned(), &url)
+        .await?;
+    let document = Html::parse_document(&text);
+
+    let item_list = document
+        .select(&selectors.news_item)
+        .map(|element| {
+            let href = element.attr("href").unwrap();
+            let title = collect_text(element.text());
+
+            let url = href.trim_start_matches("../").to_owned();
+
+            Ok(Item { url, title })
+        })
+        .collect::<Result<Vec<_>>>()?;
+
+    Ok(item_list)
+}
+
+async fn translate_news_dataset(
+    dir: &Dir,
+    client: &Client,
+    source: &Source,
+    item: Item,
+    selectors: &Selectors,
+) -> Result<(usize, usize, usize)> {
+    let key = make_key(&item.url).into_owned();
+    let url = source.url.join(&item.url)?;
+
+    let text = client.fetch_text(source, key.clone(), &url).await?;
+    let document = Html::parse_document(&text);
+
+    let description = select_text(&document, &selectors.news_description);
+    let date = select_text(&document, &selectors.news_date);
+
+    let issued = Date::parse(
+        &date,
+        format_description!("[month repr:long] [day padding:none], [year]"),
+    )?;
+
+    let provider = Organisation::WikiData {
+        identifier: 119010386, // GRDC
+        role: OrganisationRole::Publisher,
+    };
+
+    let dataset = Dataset {
+        title: item.title,
+        description: Some(description),
+        types: smallvec![Type::Text {
+            text_type: TextType::News,
+        }],
+        issued: Some(issued),
+        organisations: smallvec![provider],
+        language: Language::English,
+        license: License::OtherClosed,
+        origins: source.origins.clone(),
+        source_url: url.into(),
+        ..Default::default()
+    };
+
+    write_dataset(dir, client, source, key, dataset).await
+}
+
+async fn fetch_publication_list(
+    client: &Client,
+    source: &Source,
+    selectors: &Selectors,
+) -> Result<Vec<Item>> {
+    let url = source.url.join("publications/reports/")?;
+
+    let text = client
+        .fetch_text(source, "publication_list".to_owned(), &url)
+        .await?;
+    let document = Html::parse_document(&text);
+
+    let item_list = document
+        .select(&selectors.publication_all)
+        .map(|element| {
+            let href = element.attr("href").unwrap();
+            let title = collect_text(element.text());
+
+            let url = href.trim_start_matches("../").to_owned();
+
+            Ok(Item { url, title })
+        })
+        .collect::<Result<Vec<_>>>()?;
+
+    Ok(item_list)
+}
+
+async fn translate_publication_dataset(
+    dir: &Dir,
+    client: &Client,
+    source: &Source,
+    item: Item,
+) -> Result<(usize, usize, usize)> {
+    let key = make_key(&item.title).into_owned();
+    let url = source.url.join(&item.url)?;
+
+    let description = "Until 2015, GRDC has published annual reports including GRDC meeting reports. Those can be accessed via the link.".to_owned();
+
+    let provider = Organisation::WikiData {
+        identifier: 119010386, // GRDC
+        role: OrganisationRole::Publisher,
+    };
+
+    let resources = smallvec![Resource {
+        r#type: ResourceType::Pdf,
+        description: Some("Report of the GRDC".to_owned()),
+        url: url.to_string(),
+        primary_content: true,
+        ..Default::default()
+    }];
+
+    let dataset = Dataset {
+        title: item.title,
+        description: Some(description),
+        types: smallvec![Type::Text {
+            text_type: TextType::Report,
+        }],
+        resources,
+        organisations: smallvec![provider],
+        language: Language::English,
+        license: License::OtherClosed,
+        origins: source.origins.clone(),
+        source_url: url.into(),
+        machine_readable_source: false,
+        ..Default::default()
+    };
+
+    write_dataset(dir, client, source, key, dataset).await
+}
+
+#[derive(Debug, Clone)]
+struct Item {
+    title: String,
+    url: String,
+}
+
+selectors! {
+    publication_all: ".table a[href]",
+    publication_link: "a",
+    news_item: ".listing-title .no-external",
+    news_description: ".quarto-figure-center+ p, p+ p",
+    news_date: ".date",
+}
-- 
GitLab


From d59e1ac5f0faf892f2984710e1c1e7c7c5193dc3 Mon Sep 17 00:00:00 2001
From: Maximilian Berthold <maximilian.berthold@uba.de>
Date: Tue, 11 Feb 2025 11:59:28 +0000
Subject: [PATCH 2/3] add separate instance of instance-of to wikidata to
 harvest more organisations

---
 harvester/src/wikidata.rs | 59 ++++++++++++++++++++++-----------------
 1 file changed, 33 insertions(+), 26 deletions(-)

diff --git a/harvester/src/wikidata.rs b/harvester/src/wikidata.rs
index bfe9f44922..49a4c57062 100644
--- a/harvester/src/wikidata.rs
+++ b/harvester/src/wikidata.rs
@@ -59,19 +59,24 @@ async fn fetch_organisations(client: &Client, source: &Source) -> Result<HashSet
         387917, // administrative divisions of Germany (Q387917) entities in the administrative structure of Germany
     ];
 
+    const CLASSES: &[u64] = &[
+        5227240, // collection of numeric and/or other data sets for secondary use in research
+    ];
+
     let mut identifiers = HashSet::new();
 
     for property in PROPERTIES {
         for legal_form in LEGAL_FORMS {
-            if let Err(err) = fetch_organisation_legal_form(
-                client,
-                source,
-                &mut identifiers,
-                *property,
-                *legal_form,
-            )
-            .await
-            {
+            let query = format!(
+                r#"SELECT DISTINCT ?item WHERE {{
+                    ?item (wdt:P{property}/(wdt:P279*)) wd:Q{legal_form} ;
+                    wdt:P17 wd:Q183 . # Germany (Q183)
+                }}"#
+            );
+
+            let key = format!("organisations-{property}-{legal_form}.isol");
+
+            if let Err(err) = fetch_entities(client, source, &mut identifiers, &query, key).await {
                 tracing::error!(
                     "Failed to fetch legal form {legal_form} via property {property}: {err:#}"
                 );
@@ -79,36 +84,38 @@ async fn fetch_organisations(client: &Client, source: &Source) -> Result<HashSet
         }
     }
 
+    for class in CLASSES {
+        let query = format!(
+            r#"SELECT DISTINCT ?item WHERE {{
+                    ?item (wdt:P31/(wdt:P279*)) wd:Q{class}
+            }}"#
+        );
+
+        let key = format!("organisations-31-{class}.isol");
+
+        if let Err(err) = fetch_entities(client, source, &mut identifiers, &query, key).await {
+            tracing::error!("Failed to fetch class {class}: {err:#}");
+        }
+    }
+
     Ok(identifiers)
 }
 
-async fn fetch_organisation_legal_form(
+async fn fetch_entities(
     client: &Client,
     source: &Source,
     identifiers: &mut HashSet<u64>,
-    property: u64,
-    legal_form: u64,
+    query: &str,
+    key: String,
 ) -> Result<()> {
-    let query = format!(
-        r#"SELECT DISTINCT ?item WHERE {{
-    ?item (wdt:P{property}/(wdt:P279*)) wd:Q{legal_form} ;
-        wdt:P17 wd:Q183 . # Germany (Q183)
-}}"#
-    );
-
     let mut url = source.url.clone();
 
     url.query_pairs_mut()
-        .append_pair("query", &query)
+        .append_pair("query", query)
         .append_pair("format", "json");
 
     let bytes = client
-        .checked_fetch_bytes(
-            source,
-            timeout_exception,
-            format!("organisations-{property}-{legal_form}.isol"),
-            &url,
-        )
+        .checked_fetch_bytes(source, timeout_exception, key, &url)
         .await?;
 
     let response = from_slice::<SparqlResponse>(&bytes)?;
-- 
GitLab


From df336b2c4bbc3be305b10cfab02cedeb52ea32ae Mon Sep 17 00:00:00 2001
From: Maximilian Berthold <maximilian.berthold@uba.de>
Date: Tue, 11 Feb 2025 15:06:12 +0000
Subject: [PATCH 3/3] explicitely handle user expectation regarding forwarding
 to data source by providing necessary ID

---
 harvester/src/portal_grdc_bfg.rs | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/harvester/src/portal_grdc_bfg.rs b/harvester/src/portal_grdc_bfg.rs
index a84a94a292..02bd9e507d 100644
--- a/harvester/src/portal_grdc_bfg.rs
+++ b/harvester/src/portal_grdc_bfg.rs
@@ -96,12 +96,15 @@ async fn translate_station_dataset(
         .url
         .join("applications/public.html?publicuser=PublicUser#dataDownload/Stations")?;
 
-    let title = format!("Messstation {} am Fluss {}", item.name, item.river);
+    let title = format!(
+        "Messstation {} ('Station Number': {}) am Fluss {}",
+        item.name, item.station_id, item.river
+    );
     let description = format!(
         r#"Dieser Datensatz enthält Abflussdaten der Messstation {} am Fluss {}.
         Die Daten werden bereit gestellt vom Global Runoff Data Centre (GRDC).
         Um die Rohdaten für diese Station zu erhalten, muss das Data Portal des GRDC ({}) verwendet werden.
-        Dort kann man entweder anhand des Station Name: {} oder der Station Number: {} nach den Daten suchen."#,
+        Dort kann man entweder anhand des 'Station Name': {} oder der 'Station Number': {} nach den Daten suchen."#,
         item.name, item.river, url, item.name, item.station_id
     );
 
@@ -156,8 +159,7 @@ async fn translate_station_dataset(
         origins: source.origins.clone(),
         source_url: url.into(),
         source_url_explainer: SourceUrlExplainer::CopyStationId(
-            "zum Datenportal: bitte dort die Suchfunktion nutzen (Station Number wird automatisch kopiert)"
-                .to_owned(),
+            "zum Datenportal: bitte dort mit 'Station Number' die Suchfunktion nutzen".to_owned(),
         ),
         ..Default::default()
     };
-- 
GitLab