feat: switch to local full surge sync and local merge-convert pipeline

This commit is contained in:
袁震
2026-04-06 13:53:11 +08:00
parent f8ff7279c6
commit f1b218f57d
8 changed files with 169 additions and 697 deletions
+4
View File
@@ -9,6 +9,7 @@ on:
- main - main
paths: paths:
- main.py - main.py
- scripts/sync_surge_full.sh
- config.toml - config.toml
- config.json - config.json
- .gitea/workflows/generate-rules.yml - .gitea/workflows/generate-rules.yml
@@ -50,7 +51,10 @@ jobs:
- name: Generate rules - name: Generate rules
env: env:
GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }} GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
UPSTREAM_REF: ${{ vars.UPSTREAM_REF }}
run: | run: |
UPSTREAM_REF="${UPSTREAM_REF:-master}"
bash scripts/sync_surge_full.sh
if [ -f config.toml ]; then if [ -f config.toml ]; then
python3 main.py --config config.toml python3 main.py --config config.toml
else else
+1
View File
@@ -1,3 +1,4 @@
__pycache__/ __pycache__/
*.pyc *.pyc
config.toml config.toml
upstream/
+21 -3
View File
@@ -2,8 +2,9 @@
一个最小可用的规则生成器: 一个最小可用的规则生成器:
- 数据源来自 **Gitea** 仓库 - 先全量拉取上游 `ios_rule_script``Surge` 规则到本地缓存
- 输入按目录分类(默认读取 `rule/Surge/<Name>/<Name>.list` - 再在本地做合并去重并转换多渠道
- 输入按目录分类(默认读取 `upstream/rule/Surge/<Name>/...`
- 输出仅包含你要的两种格式: - 输出仅包含你要的两种格式:
- `dist/surge/<Name>.list` - `dist/surge/<Name>.list`
- `dist/loon/<Name>.list` - `dist/loon/<Name>.list`
@@ -22,7 +23,7 @@
## 快速开始 ## 快速开始
仓库已内置可直接跑的默认配置文件 `config.json`指向 `yuanzhen869/ios-rule-script-full` 全量源)。 仓库已内置可直接跑的默认配置文件 `config.json`本地源模式,默认读 `upstream/`)。
1. 复制配置: 1. 复制配置:
@@ -46,6 +47,12 @@ export GITEA_TOKEN='your-token'
如果数据源仓库是公开的(当前默认就是公开源),可以不设置 token。 如果数据源仓库是公开的(当前默认就是公开源),可以不设置 token。
默认配置会生成约 600+ 分类(当前为 667 个可直接生成的分类)。 默认配置会生成约 600+ 分类(当前为 667 个可直接生成的分类)。
3.1 先同步 Surge 全量源到本地缓存:
```bash
bash scripts/sync_surge_full.sh
```
4. 生成全部分类: 4. 生成全部分类:
```bash ```bash
@@ -89,6 +96,16 @@ USER-AGENT,*youtube*
## 转换规则说明 ## 转换规则说明
### 基础合并策略(Surge
每个分类优先按下面三份源文件做合并去重(按顺序):
1. `<Name>.list`
2. `<Name>_Domain.list`
3. `<Name>_Resolve.list`
如果某分类不满足上述结构,会自动回退到 `_All.list` 等可用文件。
- Surge 输出:保留源规则(去重、清理空白) - Surge 输出:保留源规则(去重、清理空白)
- Loon 输出:基于 Surge 规则直接输出(同样去重、清理空白) - Loon 输出:基于 Surge 规则直接输出(同样去重、清理空白)
- Clash 输出: - Clash 输出:
@@ -123,6 +140,7 @@ USER-AGENT,*youtube*
3. 推送到 `main` 后会自动执行;也可在 Actions 页面手动触发 3. 推送到 `main` 后会自动执行;也可在 Actions 页面手动触发
当前定时表达式是 `0 3 * * *`(UTC),对应北京时间(UTC+8)每天 `11:00` 当前定时表达式是 `0 3 * * *`(UTC),对应北京时间(UTC+8)每天 `11:00`
工作流会先执行 `scripts/sync_surge_full.sh` 拉取上游 Surge 全量数据,再生成多渠道规则。
### 发布到独立仓库/分支(已内置) ### 发布到独立仓库/分支(已内置)
+2
View File
@@ -7,6 +7,8 @@
"token_env": "GITEA_TOKEN" "token_env": "GITEA_TOKEN"
}, },
"source": { "source": {
"mode": "local",
"local_root": "upstream",
"root": "rule/Surge", "root": "rule/Surge",
"filename_pattern": "{name}.list", "filename_pattern": "{name}.list",
"include_categories": [], "include_categories": [],
+5 -1
View File
@@ -6,7 +6,11 @@ ref = "main"
token_env = "GITEA_TOKEN" token_env = "GITEA_TOKEN"
[source] [source]
# Usually this is where Surge source rules are stored in your Gitea repo. # mode=gitea: read source via Gitea API
# mode=local: read source from local filesystem cache (recommended with sync script)
mode = "local"
local_root = "upstream"
# Usually this is where Surge source rules are stored.
root = "rule/Surge" root = "rule/Surge"
filename_pattern = "{name}.list" filename_pattern = "{name}.list"
include_categories = [] include_categories = []
+6 -669
View File
@@ -9,676 +9,13 @@
"source": { "source": {
"root": "rule/Surge", "root": "rule/Surge",
"filename_pattern": "{name}.list", "filename_pattern": "{name}.list",
"include_categories": [ "include_categories": [],
"115", "exclude_categories": [
"12306", "Cloud",
"1337x", "Assassin'sCreed"
"17173",
"178",
"17zuoye",
"2KGames",
"360",
"36kr",
"3Type",
"3dm",
"4399",
"4Paradigm",
"4chan",
"51Job",
"51nod",
"56",
"58TongCheng",
"6JianFang",
"6park",
"8btc",
"9News",
"9to5",
"ABC",
"AFP",
"ALJazeera",
"AMD",
"AMP",
"AOL",
"APKCombo",
"ATTWatchTV",
"Abema",
"AbemaTV",
"AcFun",
"Accuweather",
"Acer",
"Acplay",
"Actalis",
"AdColony",
"AdGuardSDNSFilter",
"AddToAny",
"Addthis",
"Adidas",
"Adobe",
"AdobeActivation",
"Advertising",
"AdvertisingLite",
"AdvertisingMiTV",
"AdvertisingTest",
"Aerogard",
"Afdian",
"Agora",
"AiQiCha",
"AirChina",
"AirWick",
"Akamai",
"Ali213",
"AliPay",
"Alibaba",
"All4",
"Amazon",
"AmazonCN",
"AmazonIP",
"AmazonPrimeVideo",
"AmazonTrust",
"Americasvoice",
"AnTianKeJi",
"Anaconda",
"AnandTech",
"Android",
"Anime",
"Anjuke",
"Anonv",
"Anthropic",
"Antutu",
"Apifox",
"Apkpure",
"AppLovin",
"AppStore",
"Apple",
"AppleDaily",
"AppleDev",
"AppleFirmware",
"AppleHardware",
"AppleID",
"AppleMail",
"AppleMedia",
"AppleMusic",
"AppleNews",
"AppleProxy",
"AppleTV",
"Arphic",
"Asahi",
"AsianMedia",
"Atlassian",
"Atomdata",
"BBC",
"BMW",
"BOC",
"BOCOM",
"Bahamut",
"BaiDuTieBa",
"BaiFenDian",
"BaiShanYunKeJi",
"Baidu",
"BaoFengYingYin",
"BardAI",
"Battle",
"BeStore",
"Beats",
"BesTV",
"Bestbuy",
"BianFeng",
"BiliBili",
"BiliBiliIntl",
"Binance",
"Bing",
"Blizzard",
"BlockHttpDNS",
"Bloomberg",
"Blued",
"BoXun",
"Bootcss",
"BrightCove",
"BritboxUK",
"Buypass",
"ByteDance",
"CAS",
"CBS",
"CCB",
"CCTV",
"CEB",
"CETV",
"CGB",
"CHT",
"CIBN",
"CKJR",
"CMB",
"CNKI",
"CNN",
"CNNIC",
"CSDN",
"CWSeed",
"CableTV",
"CaiNiao",
"CaiXinChuanMei",
"Cake",
"Camera360",
"Canon",
"ChengTongWangPan",
"China",
"ChinaASN",
"ChinaDNS",
"ChinaIPs",
"ChinaIPsBGP",
"ChinaMax",
"ChinaMaxNoIP",
"ChinaMaxNoMedia",
"ChinaMedia",
"ChinaMobile",
"ChinaNews",
"ChinaNoMedia",
"ChinaTelecom",
"ChinaTest",
"ChinaUnicom",
"Chromecast",
"ChuangKeTie",
"ChunYou",
"Cisco",
"Civitai",
"Classic",
"Claude",
"Cloudflare",
"Cloudflarecn",
"Clubhouse",
"ClubhouseIP",
"Cnet",
"Collabora",
"Comodo",
"Contentful",
"Coolapk",
"Copilot",
"Crypto",
"Cryptocurrency",
"CyberTrust",
"DAZN",
"DMM",
"DNS",
"DaMai",
"Dailymail",
"Dailymotion",
"DanDanZan",
"Dandanplay",
"DangDang",
"Dedao",
"Deepin",
"Deezer",
"Dell",
"Developer",
"DiDi",
"DiLianWangLuo",
"DiSiFanShi",
"DiabloIII",
"DianCeWangKe",
"DigiCert",
"DigitalOcean",
"DingTalk",
"DingXiangYuan",
"Direct",
"Discord",
"DiscoveryPlus",
"Disney",
"Disqus",
"Docker",
"Domob",
"Dood",
"DouBan",
"DouYin",
"Douyu",
"Download",
"Dropbox",
"DtDNS",
"Dubox",
"Duckduckgo",
"DuoWan",
"Duolingo",
"DynDNS",
"Dynu",
"EA",
"EHGallery",
"EastMoney",
"EasyPrivacy",
"Electron",
"Eleme",
"Embl",
"Emby",
"Emojipedia",
"EncoreTVB",
"Entrust",
"Epic",
"Espn",
"FOXNOW",
"FOXPlus",
"Facebook",
"FanFou",
"FangZhengDianZi",
"Faronics",
"FeiZhu",
"FengHuangWang",
"FengXiaWangLuo",
"Figma",
"Fiio",
"FindMy",
"FitnessPlus",
"FlipBoard",
"Flurry",
"Fox",
"FreeCodeCamp",
"FuboTV",
"Funshion",
"Game",
"GaoDe",
"Garena",
"Geely",
"Gemini",
"Gettyimages",
"Gigabyte",
"GitBook",
"GitHub",
"GitLab",
"Gitee",
"Global",
"GlobalMedia",
"GlobalScholar",
"GlobalSign",
"Gog",
"Google",
"GoogleDrive",
"GoogleEarth",
"GoogleFCM",
"GoogleSearch",
"GoogleVoice",
"GovCN",
"Gucci",
"GuiGuDongLi",
"HBO",
"HBOAsia",
"HBOHK",
"HBOUSA",
"HKBN",
"HKOpenTV",
"HKedcity",
"HP",
"HWTV",
"HaiNanHangKong",
"HamiVideo",
"HanYi",
"HashiCorp",
"Haveibeenpwned",
"HeMa",
"Hearthstone",
"HeroesoftheStorm",
"Heroku",
"HibyMusic",
"Hijacking",
"Himalaya",
"Hkgolden",
"HoYoverse",
"Hpplay",
"HuYa",
"HuaShuTV",
"HuanJu",
"Huawei",
"Huffpost",
"Hulu",
"HuluJP",
"HuluUSA",
"HunanTV",
"Hupu",
"IBM",
"ICBC",
"IKEA",
"IMDB",
"IPTVMainland",
"IPTVOther",
"ITV",
"Identrust",
"Imgur",
"Instagram",
"Intel",
"Intercom",
"JOOX",
"Japonx",
"Jetbrains",
"Jfrog",
"JiGuangTuiSong",
"JianGuoYun",
"JianShu",
"JinJiangWenXue",
"JingDong",
"Jquery",
"Jsdelivr",
"JueJin",
"Jwplayer",
"KKBOX",
"KKTV",
"KakaoTalk",
"Kantv",
"Keep",
"KingSmith",
"Kingsoft",
"KouDaiShiShang",
"Ku6",
"KuKeMusic",
"KuaiDi100",
"KuaiShou",
"KuangShi",
"KugouKuwo",
"LG",
"Lan",
"LanZouYun",
"LastFM",
"LastPass",
"LeJu",
"LeTV",
"Lenovo",
"LiTV",
"LianMeng",
"Limelight",
"Line",
"LineTV",
"Linguee",
"LinkedIn",
"Linux",
"LivePerson",
"Logitech",
"LondonReal",
"LuDaShi",
"LvMiLianChuang",
"MEGA",
"MIUIPrivacy",
"MOMOShop",
"MOOMusic",
"MOOV",
"Mail",
"Mailru",
"Majsoul",
"Manorama",
"Maocloud",
"Marketing",
"McDonalds",
"MeWatch",
"MeiTu",
"MeiTuan",
"MeiZu",
"MiWu",
"Microsoft",
"MicrosoftEdge",
"Migu",
"MingLueZhaoHui",
"Mogujie",
"Mojitianqi",
"Movefree",
"Mozilla",
"My5",
"NBC",
"NGA",
"NGAA",
"NTPService",
"NYPost",
"NYTimes",
"NaSDDNS",
"Naver",
"NaverTV",
"NetEase",
"NetEaseMusic",
"Netflix",
"Niconico",
"Nike",
"Nikkei",
"Nintendo",
"NivodTV",
"Notion",
"NowE",
"Npmjs",
"Nvidia",
"OKX",
"OP",
"OPPO",
"Olevod",
"OneDrive",
"OnePlus",
"OpenAI",
"Opera",
"Oracle",
"Oreilly",
"Origin",
"OuPeng",
"Overcast",
"Overwatch",
"PBS",
"PCCW",
"PChome",
"PChomeTW",
"PPTV",
"PSBC",
"Pandora",
"PandoraTV",
"ParamountPlus",
"Patreon",
"PayPal",
"Peacock",
"Picacg",
"Picsee",
"PikPak",
"Pinduoduo",
"PingAn",
"Pinterest",
"Pixiv",
"Pixnet",
"PlayStation",
"PotatoChat",
"PrimeVideo",
"Privacy",
"PrivateTracker",
"Protonmail",
"Proxy",
"ProxyLite",
"Pubmatic",
"Purikonejp",
"Python",
"QiNiuYun",
"QingCloud",
"Qobuz",
"Qualcomm",
"QuickConnect",
"Qyyjt",
"RTHK",
"Rakuten",
"Rarbg",
"Razer",
"Reabble",
"Reddit",
"Riot",
"Rockstar",
"RuanMei",
"SFExpress",
"SMG",
"SMZDM",
"STUN",
"Salesforce",
"Samsung",
"Scaleflex",
"Scholar",
"Sectigo",
"ShangHaiJuXiao",
"Shanling",
"Sharethis",
"ShenMa",
"ShiJiChaoXing",
"ShiNongZhiKe",
"Shopee",
"Shopify",
"Sina",
"Siri",
"SkyGO",
"Slack",
"SlideShare",
"Sling",
"SmarTone",
"Snap",
"Sohu",
"Sony",
"SouFang",
"SoundCloud",
"SourceForge",
"Spark",
"Speedtest",
"Spotify",
"Stackexchange",
"StarCraftII",
"Starbucks",
"Steam",
"SteamCN",
"Stripe",
"SuNing",
"SublimeText",
"SuiShiChuanMei",
"Supercell",
"Synology",
"SystemOTA",
"TCL",
"TIDAL",
"TVB",
"TVer",
"TaiKang",
"TaiWanGood",
"TaiheMusic",
"TapTap",
"TeamViewer",
"Teambition",
"Teams",
"Telegram",
"TelegramNL",
"TelegramSG",
"TelegramUS",
"Tencent",
"TencentVideo",
"TeraBox",
"Tesla",
"TestFlight",
"ThomsonReuters",
"Threads",
"TianTianKanKan",
"TianWeiChengXin",
"TianYaForum",
"TigerFintech",
"TikTok",
"Tmdb",
"TongCheng",
"TrustWave",
"TruthSocial",
"Tumblr",
"Twitch",
"Twitter",
"U17",
"UBI",
"UC",
"UCloud",
"UKMedia",
"UPYun",
"USMedia",
"Ubisoft",
"Ubuntu",
"Udacity",
"UnionPay",
"Unity",
"VISA",
"VK",
"VOA",
"Vancl",
"Vercel",
"Verisign",
"Verizon",
"VidolTV",
"VikACG",
"Viki",
"Vimeo",
"VipShop",
"ViuTV",
"Vivo",
"Voxmedia",
"W3schools",
"WIX",
"WanKaHuanJu",
"WanMeiShiJie",
"Wanfang",
"WangSuKeJi",
"WangXinKeJi",
"WeChat",
"WeTV",
"WeType",
"WeiZhiYunDong",
"Weibo",
"WenJuanXing",
"Westerndigital",
"Whatsapp",
"WiFiMaster",
"Wikimedia",
"Wikipedia",
"WildRift",
"WoLai",
"Wordpress",
"WorldofWarcraft",
"Wteam",
"Xbox",
"XiamiMusic",
"XianYu",
"XiaoGouKeJi",
"XiaoHongShu",
"XiaoMi",
"XiaoYuanKeJi",
"XieCheng",
"XingKongWuXian",
"XueErSi",
"XueQiu",
"Xunlei",
"YYeTs",
"Yandex",
"YiChe",
"YiXiaKeJi",
"YiZhiBo",
"YouMengChuangXiang",
"YouTube",
"YouTubeMusic",
"YouZan",
"Youku",
"YuanFuDao",
"YunFanJiaSu",
"ZDNS",
"Zalo",
"Zee",
"ZeeTV",
"Zendesk",
"ZhangYue",
"ZhiYinManKe",
"ZhiYunZhong",
"Zhihu",
"ZhihuAds",
"ZhongGuoShiHua",
"ZhongWeiShiJi",
"ZhongXingTongXun",
"ZhongYuanYiShang",
"ZhuanZhuan",
"Zoho",
"aiXcoder",
"eBay",
"friDay",
"iCloud",
"iCloudPrivateRelay",
"iFlytek",
"iQIYI",
"iQIYIIntl",
"iTalkBB",
"ifanr",
"myTVSUPER",
"zhanqi"
], ],
"exclude_categories": [] "mode": "local",
"local_root": "upstream"
}, },
"output": { "output": {
"dir": "dist", "dir": "dist",
+94 -24
View File
@@ -38,6 +38,8 @@ class Config:
repo: str repo: str
ref: str ref: str
token: str | None token: str | None
source_mode: str
local_source_root: str
source_root: str source_root: str
source_filename_pattern: str source_filename_pattern: str
output_dir: str output_dir: str
@@ -150,6 +152,8 @@ def load_config(path: Path) -> Config:
repo=gitea["repo"], repo=gitea["repo"],
ref=gitea.get("ref", "main"), ref=gitea.get("ref", "main"),
token=token, token=token,
source_mode=source.get("mode", "gitea"),
local_source_root=source.get("local_root", "."),
source_root=source.get("root", "rule/Surge"), source_root=source.get("root", "rule/Surge"),
source_filename_pattern=source.get("filename_pattern", "{name}.list"), source_filename_pattern=source.get("filename_pattern", "{name}.list"),
output_dir=output.get("dir", "dist"), output_dir=output.get("dir", "dist"),
@@ -173,6 +177,19 @@ def parse_rules(content: str) -> list[RuleLine]:
continue continue
seen.add(line) seen.add(line)
# Domain-only files (e.g. *_Domain.list) may contain plain host suffixes
# without a rule prefix. Normalize them to DOMAIN-SUFFIX.
if "," not in line:
domain = line.lstrip(".").strip()
if not domain:
continue
normalized = f"DOMAIN-SUFFIX,{domain}"
if normalized in seen:
continue
seen.add(normalized)
rules.append(RuleLine(raw=normalized, rule_type="DOMAIN-SUFFIX"))
continue
parts = [part.strip() for part in line.split(",") if part.strip()] parts = [part.strip() for part in line.split(",") if part.strip()]
if not parts: if not parts:
continue continue
@@ -301,11 +318,39 @@ def should_include_category(name: str, cfg: Config, cli_names: set[str]) -> bool
return True return True
def local_abs_path(cfg: Config, relative_path: str) -> Path:
return Path(cfg.local_source_root).expanduser().resolve() / relative_path
def list_dir_source(client: GiteaClient, cfg: Config, path: str) -> list[dict[str, Any]]:
if cfg.source_mode == "local":
base = local_abs_path(cfg, path)
if not base.is_dir():
raise RuntimeError(f"Local source path is not a directory: {base}")
entries: list[dict[str, Any]] = []
for p in base.iterdir():
entry_type = "dir" if p.is_dir() else "file"
entries.append({"name": p.name, "type": entry_type})
return entries
return client.list_dir(cfg.owner, cfg.repo, path, cfg.ref)
def read_source_file(client: GiteaClient, cfg: Config, path: str) -> str:
if cfg.source_mode == "local":
local_path = local_abs_path(cfg, path)
if not local_path.is_file():
raise FileNotFoundError(str(local_path))
return local_path.read_text(encoding="utf-8", errors="replace")
return client.read_file(cfg.owner, cfg.repo, path, cfg.ref)
def find_categories(client: GiteaClient, cfg: Config, cli_names: set[str]) -> list[str]: def find_categories(client: GiteaClient, cfg: Config, cli_names: set[str]) -> list[str]:
if cfg.include_categories: if cfg.include_categories:
return sorted([n for n in cfg.include_categories if should_include_category(n, cfg, cli_names)]) return sorted([n for n in cfg.include_categories if should_include_category(n, cfg, cli_names)])
entries = client.list_dir(cfg.owner, cfg.repo, cfg.source_root, cfg.ref) entries = list_dir_source(client, cfg, cfg.source_root)
categories: list[str] = [] categories: list[str] = []
for entry in entries: for entry in entries:
@@ -327,37 +372,62 @@ def find_categories(client: GiteaClient, cfg: Config, cli_names: set[str]) -> li
return sorted(categories) return sorted(categories)
def read_file_optional(client: GiteaClient, cfg: Config, candidate_paths: list[str]) -> tuple[str | None, str | None]:
for path in candidate_paths:
try:
return path, read_source_file(client, cfg, path)
except Exception:
continue
return None, None
def build_one_category(client: GiteaClient, cfg: Config, name: str, base_out: Path) -> tuple[int, int, int, int]: def build_one_category(client: GiteaClient, cfg: Config, name: str, base_out: Path) -> tuple[int, int, int, int]:
filename_base = cfg.source_filename_pattern.format(name=name) filename_base = cfg.source_filename_pattern.format(name=name)
candidate_filenames = [ # Preferred merge model:
# 1) <Name>.list (keyword/ua/ip with no-resolve)
# 2) <Name>_Domain.list (domain rules)
# 3) <Name>_Resolve.list (keyword/ua/ip without no-resolve)
# Merge then dedupe.
merge_filenames = [
filename_base, filename_base,
f"{name}_All.list",
f"{name}_Domain.list", f"{name}_Domain.list",
f"{name}_Resolve.list", f"{name}_Resolve.list",
] ]
candidate_paths: list[str] = [] merged_chunks: list[str] = []
for fn in candidate_filenames: merged_sources: list[str] = []
candidate_paths.append(f"{cfg.source_root}/{name}/{fn}") # nested for fn in merge_filenames:
candidate_paths.append(f"{cfg.source_root}/{fn}") # flat nested = f"{cfg.source_root}/{name}/{fn}"
flat = f"{cfg.source_root}/{fn}"
src_path, src_content = read_file_optional(client, cfg, [nested, flat])
if src_path and src_content is not None:
merged_sources.append(src_path)
merged_chunks.append(src_content)
source_rel_path = "" if merged_chunks:
source_content = "" source_rel_path = " + ".join(merged_sources)
last_error: Exception | None = None rules = parse_rules("\n".join(merged_chunks))
for path in candidate_paths: else:
try: # Fallback for categories that only provide *_All.list or other variants.
source_content = client.read_file(cfg.owner, cfg.repo, path, cfg.ref) fallback_filenames = [
source_rel_path = path f"{name}_All.list",
break f"{name}_Domain.list",
except Exception as exc: f"{name}_Resolve.list",
last_error = exc filename_base,
]
if not source_rel_path: source_rel_path = ""
if last_error is not None: source_content = ""
raise last_error for fn in fallback_filenames:
raise RuntimeError(f"unable to locate source list for category: {name}") nested = f"{cfg.source_root}/{name}/{fn}"
flat = f"{cfg.source_root}/{fn}"
rules = parse_rules(source_content) src_path, src_content = read_file_optional(client, cfg, [nested, flat])
if src_path and src_content is not None:
source_rel_path = src_path
source_content = src_content
break
if not source_rel_path:
raise RuntimeError(f"unable to locate source list for category: {name}")
rules = parse_rules(source_content)
surge_out = base_out / "surge" / f"{name}.list" surge_out = base_out / "surge" / f"{name}.list"
loon_out = base_out / "loon" / f"{name}.list" loon_out = base_out / "loon" / f"{name}.list"
+36
View File
@@ -0,0 +1,36 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
TMP_DIR="$(mktemp -d)"
UPSTREAM_REPO_URL="${UPSTREAM_REPO_URL:-https://github.com/blackmatrix7/ios_rule_script.git}"
UPSTREAM_REF="${UPSTREAM_REF:-master}"
TARGET_DIR="${ROOT_DIR}/upstream"
cleanup() {
rm -rf "$TMP_DIR"
}
trap cleanup EXIT
echo "[sync] clone upstream: $UPSTREAM_REPO_URL@$UPSTREAM_REF"
git clone --depth=1 --branch "$UPSTREAM_REF" "$UPSTREAM_REPO_URL" "$TMP_DIR/upstream" >/dev/null 2>&1
mkdir -p "$TARGET_DIR/rule"
rm -rf "$TARGET_DIR/rule/Surge"
cp -R "$TMP_DIR/upstream/rule/Surge" "$TARGET_DIR/rule/Surge"
UPSTREAM_COMMIT="$(git -C "$TMP_DIR/upstream" rev-parse HEAD)"
UPSTREAM_DATE="$(git -C "$TMP_DIR/upstream" show -s --date=iso --format=%cd HEAD)"
cat > "$TARGET_DIR/README.md" <<TXT
# Local Upstream Cache
This directory is generated by scripts/sync_surge_full.sh.
- Source: $UPSTREAM_REPO_URL
- Ref: $UPSTREAM_REF
- Commit: $UPSTREAM_COMMIT
- Commit Date: $UPSTREAM_DATE
TXT
echo "[sync] done: commit=$UPSTREAM_COMMIT"