mirror of
https://github.com/SagerNet/sing-box.git
synced 2025-06-13 21:54:13 +08:00
Compare commits
11 Commits
dev-next
...
v1.8.0-alp
Author | SHA1 | Date | |
---|---|---|---|
![]() |
292d63d4c9 | ||
![]() |
6fbcac301f | ||
![]() |
b5675e1350 | ||
![]() |
53c270cd3f | ||
![]() |
b78a74e7b7 | ||
![]() |
8d58e88105 | ||
![]() |
aa72c6ee6f | ||
![]() |
5d225457ef | ||
![]() |
4ae9b09d77 | ||
![]() |
6c6af936d0 | ||
![]() |
1bf48d792e |
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,6 +1,7 @@
|
||||
/.idea/
|
||||
/vendor/
|
||||
/*.json
|
||||
/*.srs
|
||||
/*.db
|
||||
/site/
|
||||
/bin/
|
||||
|
@ -1,11 +1,16 @@
|
||||
package adapter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/sagernet/sing-box/common/urltest"
|
||||
N "github.com/sagernet/sing/common/network"
|
||||
"github.com/sagernet/sing/common/rw"
|
||||
)
|
||||
|
||||
type ClashServer interface {
|
||||
@ -13,22 +18,83 @@ type ClashServer interface {
|
||||
PreStarter
|
||||
Mode() string
|
||||
ModeList() []string
|
||||
StoreSelected() bool
|
||||
StoreFakeIP() bool
|
||||
CacheFile() ClashCacheFile
|
||||
HistoryStorage() *urltest.HistoryStorage
|
||||
RoutedConnection(ctx context.Context, conn net.Conn, metadata InboundContext, matchedRule Rule) (net.Conn, Tracker)
|
||||
RoutedPacketConnection(ctx context.Context, conn N.PacketConn, metadata InboundContext, matchedRule Rule) (N.PacketConn, Tracker)
|
||||
}
|
||||
|
||||
type ClashCacheFile interface {
|
||||
type CacheFile interface {
|
||||
Service
|
||||
PreStarter
|
||||
|
||||
StoreFakeIP() bool
|
||||
FakeIPStorage
|
||||
|
||||
LoadMode() string
|
||||
StoreMode(mode string) error
|
||||
LoadSelected(group string) string
|
||||
StoreSelected(group string, selected string) error
|
||||
LoadGroupExpand(group string) (isExpand bool, loaded bool)
|
||||
StoreGroupExpand(group string, expand bool) error
|
||||
FakeIPStorage
|
||||
LoadRuleSet(tag string) *SavedRuleSet
|
||||
SaveRuleSet(tag string, set *SavedRuleSet) error
|
||||
}
|
||||
|
||||
type SavedRuleSet struct {
|
||||
Content []byte
|
||||
LastUpdated time.Time
|
||||
LastEtag string
|
||||
}
|
||||
|
||||
func (s *SavedRuleSet) MarshalBinary() ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
err := binary.Write(&buffer, binary.BigEndian, uint8(1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = rw.WriteUVariant(&buffer, uint64(len(s.Content)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buffer.Write(s.Content)
|
||||
err = binary.Write(&buffer, binary.BigEndian, s.LastUpdated.Unix())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = rw.WriteVString(&buffer, s.LastEtag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func (s *SavedRuleSet) UnmarshalBinary(data []byte) error {
|
||||
reader := bytes.NewReader(data)
|
||||
var version uint8
|
||||
err := binary.Read(reader, binary.BigEndian, &version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contentLen, err := rw.ReadUVariant(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Content = make([]byte, contentLen)
|
||||
_, err = io.ReadFull(reader, s.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var lastUpdated int64
|
||||
err = binary.Read(reader, binary.BigEndian, &lastUpdated)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.LastUpdated = time.Unix(lastUpdated, 0)
|
||||
s.LastEtag, err = rw.ReadVString(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Tracker interface {
|
||||
|
@ -46,11 +46,24 @@ type InboundContext struct {
|
||||
SourceGeoIPCode string
|
||||
GeoIPCode string
|
||||
ProcessInfo *process.Info
|
||||
QueryType uint16
|
||||
FakeIP bool
|
||||
|
||||
// dns cache
|
||||
// rule cache
|
||||
|
||||
QueryType uint16
|
||||
IPCIDRMatchSource bool
|
||||
SourceAddressMatch bool
|
||||
SourcePortMatch bool
|
||||
DestinationAddressMatch bool
|
||||
DestinationPortMatch bool
|
||||
}
|
||||
|
||||
func (c *InboundContext) ResetRuleCache() {
|
||||
c.IPCIDRMatchSource = false
|
||||
c.SourceAddressMatch = false
|
||||
c.SourcePortMatch = false
|
||||
c.DestinationAddressMatch = false
|
||||
c.DestinationPortMatch = false
|
||||
}
|
||||
|
||||
type inboundContextKey struct{}
|
||||
|
@ -2,12 +2,14 @@ package adapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
|
||||
"github.com/sagernet/sing-box/common/geoip"
|
||||
"github.com/sagernet/sing-dns"
|
||||
"github.com/sagernet/sing-tun"
|
||||
"github.com/sagernet/sing/common/control"
|
||||
N "github.com/sagernet/sing/common/network"
|
||||
"github.com/sagernet/sing/service"
|
||||
|
||||
mdns "github.com/miekg/dns"
|
||||
@ -15,10 +17,11 @@ import (
|
||||
|
||||
type Router interface {
|
||||
Service
|
||||
PostStarter
|
||||
|
||||
Outbounds() []Outbound
|
||||
Outbound(tag string) (Outbound, bool)
|
||||
DefaultOutbound(network string) Outbound
|
||||
DefaultOutbound(network string) (Outbound, error)
|
||||
|
||||
FakeIPStore() FakeIPStore
|
||||
|
||||
@ -27,6 +30,8 @@ type Router interface {
|
||||
GeoIPReader() *geoip.Reader
|
||||
LoadGeosite(code string) (Rule, error)
|
||||
|
||||
RuleSet(tag string) (RuleSet, bool)
|
||||
|
||||
Exchange(ctx context.Context, message *mdns.Msg) (*mdns.Msg, error)
|
||||
Lookup(ctx context.Context, domain string, strategy dns.DomainStrategy) ([]netip.Addr, error)
|
||||
LookupDefault(ctx context.Context, domain string) ([]netip.Addr, error)
|
||||
@ -61,11 +66,15 @@ func RouterFromContext(ctx context.Context) Router {
|
||||
return service.FromContext[Router](ctx)
|
||||
}
|
||||
|
||||
type HeadlessRule interface {
|
||||
Match(metadata *InboundContext) bool
|
||||
}
|
||||
|
||||
type Rule interface {
|
||||
HeadlessRule
|
||||
Service
|
||||
Type() string
|
||||
UpdateGeosite() error
|
||||
Match(metadata *InboundContext) bool
|
||||
Outbound() string
|
||||
String() string
|
||||
}
|
||||
@ -76,6 +85,18 @@ type DNSRule interface {
|
||||
RewriteTTL() *uint32
|
||||
}
|
||||
|
||||
type RuleSet interface {
|
||||
StartContext(ctx context.Context, startContext RuleSetStartContext) error
|
||||
PostStart() error
|
||||
Close() error
|
||||
HeadlessRule
|
||||
}
|
||||
|
||||
type RuleSetStartContext interface {
|
||||
HTTPClient(detour string, dialer N.Dialer) *http.Client
|
||||
Close()
|
||||
}
|
||||
|
||||
type InterfaceUpdateListener interface {
|
||||
InterfaceUpdated()
|
||||
}
|
||||
|
57
box.go
57
box.go
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/sagernet/sing-box/adapter"
|
||||
"github.com/sagernet/sing-box/experimental"
|
||||
"github.com/sagernet/sing-box/experimental/cachefile"
|
||||
"github.com/sagernet/sing-box/experimental/libbox/platform"
|
||||
"github.com/sagernet/sing-box/inbound"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
@ -32,7 +33,8 @@ type Box struct {
|
||||
outbounds []adapter.Outbound
|
||||
logFactory log.Factory
|
||||
logger log.ContextLogger
|
||||
preServices map[string]adapter.Service
|
||||
preServices1 map[string]adapter.Service
|
||||
preServices2 map[string]adapter.Service
|
||||
postServices map[string]adapter.Service
|
||||
done chan struct{}
|
||||
}
|
||||
@ -45,17 +47,21 @@ type Options struct {
|
||||
}
|
||||
|
||||
func New(options Options) (*Box, error) {
|
||||
createdAt := time.Now()
|
||||
ctx := options.Context
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
ctx = service.ContextWithDefaultRegistry(ctx)
|
||||
ctx = pause.ContextWithDefaultManager(ctx)
|
||||
createdAt := time.Now()
|
||||
experimentalOptions := common.PtrValueOrDefault(options.Experimental)
|
||||
applyDebugOptions(common.PtrValueOrDefault(experimentalOptions.Debug))
|
||||
var needCacheFile bool
|
||||
var needClashAPI bool
|
||||
var needV2RayAPI bool
|
||||
if experimentalOptions.CacheFile != nil && experimentalOptions.CacheFile.Enabled || options.PlatformLogWriter != nil {
|
||||
needCacheFile = true
|
||||
}
|
||||
if experimentalOptions.ClashAPI != nil || options.PlatformLogWriter != nil {
|
||||
needClashAPI = true
|
||||
}
|
||||
@ -145,8 +151,14 @@ func New(options Options) (*Box, error) {
|
||||
return nil, E.Cause(err, "initialize platform interface")
|
||||
}
|
||||
}
|
||||
preServices := make(map[string]adapter.Service)
|
||||
preServices1 := make(map[string]adapter.Service)
|
||||
preServices2 := make(map[string]adapter.Service)
|
||||
postServices := make(map[string]adapter.Service)
|
||||
if needCacheFile {
|
||||
cacheFile := cachefile.NewCacheFile(ctx, common.PtrValueOrDefault(experimentalOptions.CacheFile))
|
||||
preServices1["cache file"] = cacheFile
|
||||
service.MustRegister[adapter.CacheFile](ctx, cacheFile)
|
||||
}
|
||||
if needClashAPI {
|
||||
clashAPIOptions := common.PtrValueOrDefault(experimentalOptions.ClashAPI)
|
||||
clashAPIOptions.ModeList = experimental.CalculateClashModeList(options.Options)
|
||||
@ -155,7 +167,7 @@ func New(options Options) (*Box, error) {
|
||||
return nil, E.Cause(err, "create clash api server")
|
||||
}
|
||||
router.SetClashServer(clashServer)
|
||||
preServices["clash api"] = clashServer
|
||||
preServices2["clash api"] = clashServer
|
||||
}
|
||||
if needV2RayAPI {
|
||||
v2rayServer, err := experimental.NewV2RayServer(logFactory.NewLogger("v2ray-api"), common.PtrValueOrDefault(experimentalOptions.V2RayAPI))
|
||||
@ -163,7 +175,7 @@ func New(options Options) (*Box, error) {
|
||||
return nil, E.Cause(err, "create v2ray api server")
|
||||
}
|
||||
router.SetV2RayServer(v2rayServer)
|
||||
preServices["v2ray api"] = v2rayServer
|
||||
preServices2["v2ray api"] = v2rayServer
|
||||
}
|
||||
return &Box{
|
||||
router: router,
|
||||
@ -172,7 +184,8 @@ func New(options Options) (*Box, error) {
|
||||
createdAt: createdAt,
|
||||
logFactory: logFactory,
|
||||
logger: logFactory.Logger(),
|
||||
preServices: preServices,
|
||||
preServices1: preServices1,
|
||||
preServices2: preServices2,
|
||||
postServices: postServices,
|
||||
done: make(chan struct{}),
|
||||
}, nil
|
||||
@ -217,7 +230,16 @@ func (s *Box) Start() error {
|
||||
}
|
||||
|
||||
func (s *Box) preStart() error {
|
||||
for serviceName, service := range s.preServices {
|
||||
for serviceName, service := range s.preServices1 {
|
||||
if preService, isPreService := service.(adapter.PreStarter); isPreService {
|
||||
s.logger.Trace("pre-start ", serviceName)
|
||||
err := preService.PreStart()
|
||||
if err != nil {
|
||||
return E.Cause(err, "pre-starting ", serviceName)
|
||||
}
|
||||
}
|
||||
}
|
||||
for serviceName, service := range s.preServices2 {
|
||||
if preService, isPreService := service.(adapter.PreStarter); isPreService {
|
||||
s.logger.Trace("pre-start ", serviceName)
|
||||
err := preService.PreStart()
|
||||
@ -238,7 +260,14 @@ func (s *Box) start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for serviceName, service := range s.preServices {
|
||||
for serviceName, service := range s.preServices1 {
|
||||
s.logger.Trace("starting ", serviceName)
|
||||
err = service.Start()
|
||||
if err != nil {
|
||||
return E.Cause(err, "start ", serviceName)
|
||||
}
|
||||
}
|
||||
for serviceName, service := range s.preServices2 {
|
||||
s.logger.Trace("starting ", serviceName)
|
||||
err = service.Start()
|
||||
if err != nil {
|
||||
@ -278,6 +307,10 @@ func (s *Box) postStart() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
err := s.router.PostStart()
|
||||
if err != nil {
|
||||
return E.Cause(err, "post-start router")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -313,7 +346,13 @@ func (s *Box) Close() error {
|
||||
return E.Cause(err, "close router")
|
||||
})
|
||||
}
|
||||
for serviceName, service := range s.preServices {
|
||||
for serviceName, service := range s.preServices1 {
|
||||
s.logger.Trace("closing ", serviceName)
|
||||
errors = E.Append(errors, service.Close(), func(err error) error {
|
||||
return E.Cause(err, "close ", serviceName)
|
||||
})
|
||||
}
|
||||
for serviceName, service := range s.preServices2 {
|
||||
s.logger.Trace("closing ", serviceName)
|
||||
errors = E.Append(errors, service.Close(), func(err error) error {
|
||||
return E.Cause(err, "close ", serviceName)
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@ -69,41 +68,3 @@ func format() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatOne(configPath string) error {
|
||||
configContent, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return E.Cause(err, "read config")
|
||||
}
|
||||
var options option.Options
|
||||
err = options.UnmarshalJSON(configContent)
|
||||
if err != nil {
|
||||
return E.Cause(err, "decode config")
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
encoder := json.NewEncoder(buffer)
|
||||
encoder.SetIndent("", " ")
|
||||
err = encoder.Encode(options)
|
||||
if err != nil {
|
||||
return E.Cause(err, "encode config")
|
||||
}
|
||||
if !commandFormatFlagWrite {
|
||||
os.Stdout.WriteString(buffer.String() + "\n")
|
||||
return nil
|
||||
}
|
||||
if bytes.Equal(configContent, buffer.Bytes()) {
|
||||
return nil
|
||||
}
|
||||
output, err := os.Create(configPath)
|
||||
if err != nil {
|
||||
return E.Cause(err, "open output")
|
||||
}
|
||||
_, err = output.Write(buffer.Bytes())
|
||||
output.Close()
|
||||
if err != nil {
|
||||
return E.Cause(err, "write output")
|
||||
}
|
||||
outputPath, _ := filepath.Abs(configPath)
|
||||
os.Stderr.WriteString(outputPath + "\n")
|
||||
return nil
|
||||
}
|
||||
|
43
cmd/sing-box/cmd_geoip.go
Normal file
43
cmd/sing-box/cmd_geoip.go
Normal file
@ -0,0 +1,43 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/sagernet/sing-box/log"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
geoipReader *maxminddb.Reader
|
||||
commandGeoIPFlagFile string
|
||||
)
|
||||
|
||||
var commandGeoip = &cobra.Command{
|
||||
Use: "geoip",
|
||||
Short: "GeoIP tools",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
err := geoipPreRun()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandGeoip.PersistentFlags().StringVarP(&commandGeoIPFlagFile, "file", "f", "geoip.db", "geoip file")
|
||||
mainCommand.AddCommand(commandGeoip)
|
||||
}
|
||||
|
||||
func geoipPreRun() error {
|
||||
reader, err := maxminddb.Open(commandGeoIPFlagFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if reader.Metadata.DatabaseType != "sing-geoip" {
|
||||
reader.Close()
|
||||
return E.New("incorrect database type, expected sing-geoip, got ", reader.Metadata.DatabaseType)
|
||||
}
|
||||
geoipReader = reader
|
||||
return nil
|
||||
}
|
98
cmd/sing-box/cmd_geoip_export.go
Normal file
98
cmd/sing-box/cmd_geoip_export.go
Normal file
@ -0,0 +1,98 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
C "github.com/sagernet/sing-box/constant"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var flagGeoipExportOutput string
|
||||
|
||||
const flagGeoipExportDefaultOutput = "geoip-<country>.srs"
|
||||
|
||||
var commandGeoipExport = &cobra.Command{
|
||||
Use: "export <country>",
|
||||
Short: "Export geoip country as rule-set",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := geoipExport(args[0])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandGeoipExport.Flags().StringVarP(&flagGeoipExportOutput, "output", "o", flagGeoipExportDefaultOutput, "Output path")
|
||||
commandGeoip.AddCommand(commandGeoipExport)
|
||||
}
|
||||
|
||||
func geoipExport(countryCode string) error {
|
||||
networks := geoipReader.Networks(maxminddb.SkipAliasedNetworks)
|
||||
countryMap := make(map[string][]*net.IPNet)
|
||||
var (
|
||||
ipNet *net.IPNet
|
||||
nextCountryCode string
|
||||
err error
|
||||
)
|
||||
for networks.Next() {
|
||||
ipNet, err = networks.Network(&nextCountryCode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
countryMap[nextCountryCode] = append(countryMap[nextCountryCode], ipNet)
|
||||
}
|
||||
ipNets := countryMap[strings.ToLower(countryCode)]
|
||||
if len(ipNets) == 0 {
|
||||
return E.New("country code not found: ", countryCode)
|
||||
}
|
||||
|
||||
var (
|
||||
outputFile *os.File
|
||||
outputWriter io.Writer
|
||||
)
|
||||
if flagGeoipExportOutput == "stdout" {
|
||||
outputWriter = os.Stdout
|
||||
} else if flagGeoipExportOutput == flagGeoipExportDefaultOutput {
|
||||
outputFile, err = os.Create("geoip-" + countryCode + ".json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outputFile.Close()
|
||||
outputWriter = outputFile
|
||||
} else {
|
||||
outputFile, err = os.Create(flagGeoipExportOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outputFile.Close()
|
||||
outputWriter = outputFile
|
||||
}
|
||||
|
||||
encoder := json.NewEncoder(outputWriter)
|
||||
encoder.SetIndent("", " ")
|
||||
var headlessRule option.DefaultHeadlessRule
|
||||
headlessRule.IPCIDR = make([]string, 0, len(ipNets))
|
||||
for _, cidr := range ipNets {
|
||||
headlessRule.IPCIDR = append(headlessRule.IPCIDR, cidr.String())
|
||||
}
|
||||
var plainRuleSet option.PlainRuleSetCompat
|
||||
plainRuleSet.Version = C.RuleSetVersion1
|
||||
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
||||
{
|
||||
Type: C.RuleTypeDefault,
|
||||
DefaultOptions: headlessRule,
|
||||
},
|
||||
}
|
||||
return encoder.Encode(plainRuleSet)
|
||||
}
|
31
cmd/sing-box/cmd_geoip_list.go
Normal file
31
cmd/sing-box/cmd_geoip_list.go
Normal file
@ -0,0 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/sagernet/sing-box/log"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var commandGeoipList = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List geoip country codes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := listGeoip()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandGeoip.AddCommand(commandGeoipList)
|
||||
}
|
||||
|
||||
func listGeoip() error {
|
||||
for _, code := range geoipReader.Metadata.Languages {
|
||||
os.Stdout.WriteString(code + "\n")
|
||||
}
|
||||
return nil
|
||||
}
|
47
cmd/sing-box/cmd_geoip_lookup.go
Normal file
47
cmd/sing-box/cmd_geoip_lookup.go
Normal file
@ -0,0 +1,47 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"os"
|
||||
|
||||
"github.com/sagernet/sing-box/log"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
N "github.com/sagernet/sing/common/network"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var commandGeoipLookup = &cobra.Command{
|
||||
Use: "lookup <address>",
|
||||
Short: "Lookup if an IP address is contained in the GeoIP database",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := geoipLookup(args[0])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandGeoip.AddCommand(commandGeoipLookup)
|
||||
}
|
||||
|
||||
func geoipLookup(address string) error {
|
||||
addr, err := netip.ParseAddr(address)
|
||||
if err != nil {
|
||||
return E.Cause(err, "parse address")
|
||||
}
|
||||
if !N.IsPublicAddr(addr) {
|
||||
os.Stdout.WriteString("private\n")
|
||||
return nil
|
||||
}
|
||||
var code string
|
||||
_ = geoipReader.Lookup(addr.AsSlice(), &code)
|
||||
if code != "" {
|
||||
os.Stdout.WriteString(code + "\n")
|
||||
return nil
|
||||
}
|
||||
os.Stdout.WriteString("unknown\n")
|
||||
return nil
|
||||
}
|
41
cmd/sing-box/cmd_geosite.go
Normal file
41
cmd/sing-box/cmd_geosite.go
Normal file
@ -0,0 +1,41 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/sagernet/sing-box/common/geosite"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
commandGeoSiteFlagFile string
|
||||
geositeReader *geosite.Reader
|
||||
geositeCodeList []string
|
||||
)
|
||||
|
||||
var commandGeoSite = &cobra.Command{
|
||||
Use: "geosite",
|
||||
Short: "Geosite tools",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
err := geositePreRun()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandGeoSite.PersistentFlags().StringVarP(&commandGeoSiteFlagFile, "file", "f", "geosite.db", "geosite file")
|
||||
mainCommand.AddCommand(commandGeoSite)
|
||||
}
|
||||
|
||||
func geositePreRun() error {
|
||||
reader, codeList, err := geosite.Open(commandGeoSiteFlagFile)
|
||||
if err != nil {
|
||||
return E.Cause(err, "open geosite file")
|
||||
}
|
||||
geositeReader = reader
|
||||
geositeCodeList = codeList
|
||||
return nil
|
||||
}
|
81
cmd/sing-box/cmd_geosite_export.go
Normal file
81
cmd/sing-box/cmd_geosite_export.go
Normal file
@ -0,0 +1,81 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/sagernet/sing-box/common/geosite"
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
C "github.com/sagernet/sing-box/constant"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var commandGeositeExportOutput string
|
||||
|
||||
const commandGeositeExportDefaultOutput = "geosite-<category>.json"
|
||||
|
||||
var commandGeositeExport = &cobra.Command{
|
||||
Use: "export <category>",
|
||||
Short: "Export geosite category as rule-set",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := geositeExport(args[0])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandGeositeExport.Flags().StringVarP(&commandGeositeExportOutput, "output", "o", commandGeositeExportDefaultOutput, "Output path")
|
||||
commandGeoSite.AddCommand(commandGeositeExport)
|
||||
}
|
||||
|
||||
func geositeExport(category string) error {
|
||||
sourceSet, err := geositeReader.Read(category)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
outputFile *os.File
|
||||
outputWriter io.Writer
|
||||
)
|
||||
if commandGeositeExportOutput == "stdout" {
|
||||
outputWriter = os.Stdout
|
||||
} else if commandGeositeExportOutput == commandGeositeExportDefaultOutput {
|
||||
outputFile, err = os.Create("geosite-" + category + ".json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outputFile.Close()
|
||||
outputWriter = outputFile
|
||||
} else {
|
||||
outputFile, err = os.Create(commandGeositeExportOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outputFile.Close()
|
||||
outputWriter = outputFile
|
||||
}
|
||||
|
||||
encoder := json.NewEncoder(outputWriter)
|
||||
encoder.SetIndent("", " ")
|
||||
var headlessRule option.DefaultHeadlessRule
|
||||
defaultRule := geosite.Compile(sourceSet)
|
||||
headlessRule.Domain = defaultRule.Domain
|
||||
headlessRule.DomainSuffix = defaultRule.DomainSuffix
|
||||
headlessRule.DomainKeyword = defaultRule.DomainKeyword
|
||||
headlessRule.DomainRegex = defaultRule.DomainRegex
|
||||
var plainRuleSet option.PlainRuleSetCompat
|
||||
plainRuleSet.Version = C.RuleSetVersion1
|
||||
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
||||
{
|
||||
Type: C.RuleTypeDefault,
|
||||
DefaultOptions: headlessRule,
|
||||
},
|
||||
}
|
||||
return encoder.Encode(plainRuleSet)
|
||||
}
|
50
cmd/sing-box/cmd_geosite_list.go
Normal file
50
cmd/sing-box/cmd_geosite_list.go
Normal file
@ -0,0 +1,50 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/sagernet/sing-box/log"
|
||||
F "github.com/sagernet/sing/common/format"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var commandGeositeList = &cobra.Command{
|
||||
Use: "list <category>",
|
||||
Short: "List geosite categories",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := geositeList()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandGeoSite.AddCommand(commandGeositeList)
|
||||
}
|
||||
|
||||
func geositeList() error {
|
||||
var geositeEntry []struct {
|
||||
category string
|
||||
items int
|
||||
}
|
||||
for _, category := range geositeCodeList {
|
||||
sourceSet, err := geositeReader.Read(category)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
geositeEntry = append(geositeEntry, struct {
|
||||
category string
|
||||
items int
|
||||
}{category, len(sourceSet)})
|
||||
}
|
||||
sort.SliceStable(geositeEntry, func(i, j int) bool {
|
||||
return geositeEntry[i].items < geositeEntry[j].items
|
||||
})
|
||||
for _, entry := range geositeEntry {
|
||||
os.Stdout.WriteString(F.ToString(entry.category, " (", entry.items, ")\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
97
cmd/sing-box/cmd_geosite_lookup.go
Normal file
97
cmd/sing-box/cmd_geosite_lookup.go
Normal file
@ -0,0 +1,97 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/sagernet/sing-box/log"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var commandGeositeLookup = &cobra.Command{
|
||||
Use: "lookup [category] <domain>",
|
||||
Short: "Check if a domain is in the geosite",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var (
|
||||
source string
|
||||
target string
|
||||
)
|
||||
switch len(args) {
|
||||
case 1:
|
||||
target = args[0]
|
||||
case 2:
|
||||
source = args[0]
|
||||
target = args[1]
|
||||
}
|
||||
err := geositeLookup(source, target)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandGeoSite.AddCommand(commandGeositeLookup)
|
||||
}
|
||||
|
||||
func geositeLookup(source string, target string) error {
|
||||
var sourceMatcherList []struct {
|
||||
code string
|
||||
matcher *searchGeositeMatcher
|
||||
}
|
||||
if source != "" {
|
||||
sourceSet, err := geositeReader.Read(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sourceMatcher, err := newSearchGeositeMatcher(sourceSet)
|
||||
if err != nil {
|
||||
return E.Cause(err, "compile code: "+source)
|
||||
}
|
||||
sourceMatcherList = []struct {
|
||||
code string
|
||||
matcher *searchGeositeMatcher
|
||||
}{
|
||||
{
|
||||
code: source,
|
||||
matcher: sourceMatcher,
|
||||
},
|
||||
}
|
||||
|
||||
} else {
|
||||
for _, code := range geositeCodeList {
|
||||
sourceSet, err := geositeReader.Read(code)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sourceMatcher, err := newSearchGeositeMatcher(sourceSet)
|
||||
if err != nil {
|
||||
return E.Cause(err, "compile code: "+code)
|
||||
}
|
||||
sourceMatcherList = append(sourceMatcherList, struct {
|
||||
code string
|
||||
matcher *searchGeositeMatcher
|
||||
}{
|
||||
code: code,
|
||||
matcher: sourceMatcher,
|
||||
})
|
||||
}
|
||||
}
|
||||
sort.SliceStable(sourceMatcherList, func(i, j int) bool {
|
||||
return sourceMatcherList[i].code < sourceMatcherList[j].code
|
||||
})
|
||||
|
||||
for _, matcherItem := range sourceMatcherList {
|
||||
if matchRule := matcherItem.matcher.Match(target); matchRule != "" {
|
||||
os.Stdout.WriteString("Match code (")
|
||||
os.Stdout.WriteString(matcherItem.code)
|
||||
os.Stdout.WriteString(") ")
|
||||
os.Stdout.WriteString(matchRule)
|
||||
os.Stdout.WriteString("\n")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
56
cmd/sing-box/cmd_geosite_matcher.go
Normal file
56
cmd/sing-box/cmd_geosite_matcher.go
Normal file
@ -0,0 +1,56 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/sagernet/sing-box/common/geosite"
|
||||
)
|
||||
|
||||
type searchGeositeMatcher struct {
|
||||
domainMap map[string]bool
|
||||
suffixList []string
|
||||
keywordList []string
|
||||
regexList []string
|
||||
}
|
||||
|
||||
func newSearchGeositeMatcher(items []geosite.Item) (*searchGeositeMatcher, error) {
|
||||
options := geosite.Compile(items)
|
||||
domainMap := make(map[string]bool)
|
||||
for _, domain := range options.Domain {
|
||||
domainMap[domain] = true
|
||||
}
|
||||
rule := &searchGeositeMatcher{
|
||||
domainMap: domainMap,
|
||||
suffixList: options.DomainSuffix,
|
||||
keywordList: options.DomainKeyword,
|
||||
regexList: options.DomainRegex,
|
||||
}
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
func (r *searchGeositeMatcher) Match(domain string) string {
|
||||
if r.domainMap[domain] {
|
||||
return "domain=" + domain
|
||||
}
|
||||
for _, suffix := range r.suffixList {
|
||||
if strings.HasSuffix(domain, suffix) {
|
||||
return "domain_suffix=" + suffix
|
||||
}
|
||||
}
|
||||
for _, keyword := range r.keywordList {
|
||||
if strings.Contains(domain, keyword) {
|
||||
return "domain_keyword=" + keyword
|
||||
}
|
||||
}
|
||||
for _, regexStr := range r.regexList {
|
||||
regex, err := regexp.Compile(regexStr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if regex.MatchString(domain) {
|
||||
return "domain_regex=" + regexStr
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
@ -18,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
var commandMerge = &cobra.Command{
|
||||
Use: "merge [output]",
|
||||
Use: "merge <output>",
|
||||
Short: "Merge configurations",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := merge(args[0])
|
||||
|
14
cmd/sing-box/cmd_rule_set.go
Normal file
14
cmd/sing-box/cmd_rule_set.go
Normal file
@ -0,0 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var commandRuleSet = &cobra.Command{
|
||||
Use: "rule-set",
|
||||
Short: "Manage rule sets",
|
||||
}
|
||||
|
||||
func init() {
|
||||
mainCommand.AddCommand(commandRuleSet)
|
||||
}
|
80
cmd/sing-box/cmd_rule_set_compile.go
Normal file
80
cmd/sing-box/cmd_rule_set_compile.go
Normal file
@ -0,0 +1,80 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
"github.com/sagernet/sing-box/common/srs"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var flagRuleSetCompileOutput string
|
||||
|
||||
const flagRuleSetCompileDefaultOutput = "<file_name>.srs"
|
||||
|
||||
var commandRuleSetCompile = &cobra.Command{
|
||||
Use: "compile [source-path]",
|
||||
Short: "Compile rule-set json to binary",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := compileRuleSet(args[0])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandRuleSet.AddCommand(commandRuleSetCompile)
|
||||
commandRuleSetCompile.Flags().StringVarP(&flagRuleSetCompileOutput, "output", "o", flagRuleSetCompileDefaultOutput, "Output file")
|
||||
}
|
||||
|
||||
func compileRuleSet(sourcePath string) error {
|
||||
var (
|
||||
reader io.Reader
|
||||
err error
|
||||
)
|
||||
if sourcePath == "stdin" {
|
||||
reader = os.Stdin
|
||||
} else {
|
||||
reader, err = os.Open(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
decoder := json.NewDecoder(json.NewCommentFilter(reader))
|
||||
decoder.DisallowUnknownFields()
|
||||
var plainRuleSet option.PlainRuleSetCompat
|
||||
err = decoder.Decode(&plainRuleSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ruleSet := plainRuleSet.Upgrade()
|
||||
var outputPath string
|
||||
if flagRuleSetCompileOutput == flagRuleSetCompileDefaultOutput {
|
||||
if strings.HasSuffix(sourcePath, ".json") {
|
||||
outputPath = sourcePath[:len(sourcePath)-5] + ".srs"
|
||||
} else {
|
||||
outputPath = sourcePath + ".srs"
|
||||
}
|
||||
} else {
|
||||
outputPath = flagRuleSetCompileOutput
|
||||
}
|
||||
outputFile, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = srs.Write(outputFile, ruleSet)
|
||||
if err != nil {
|
||||
outputFile.Close()
|
||||
os.Remove(outputPath)
|
||||
return err
|
||||
}
|
||||
outputFile.Close()
|
||||
return nil
|
||||
}
|
87
cmd/sing-box/cmd_rule_set_format.go
Normal file
87
cmd/sing-box/cmd_rule_set_format.go
Normal file
@ -0,0 +1,87 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var commandRuleSetFormatFlagWrite bool
|
||||
|
||||
var commandRuleSetFormat = &cobra.Command{
|
||||
Use: "format <source-path>",
|
||||
Short: "Format rule-set json",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := formatRuleSet(args[0])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
commandRuleSetFormat.Flags().BoolVarP(&commandRuleSetFormatFlagWrite, "write", "w", false, "write result to (source) file instead of stdout")
|
||||
commandRuleSet.AddCommand(commandRuleSetFormat)
|
||||
}
|
||||
|
||||
func formatRuleSet(sourcePath string) error {
|
||||
var (
|
||||
reader io.Reader
|
||||
err error
|
||||
)
|
||||
if sourcePath == "stdin" {
|
||||
reader = os.Stdin
|
||||
} else {
|
||||
reader, err = os.Open(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
content, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decoder := json.NewDecoder(json.NewCommentFilter(bytes.NewReader(content)))
|
||||
decoder.DisallowUnknownFields()
|
||||
var plainRuleSet option.PlainRuleSetCompat
|
||||
err = decoder.Decode(&plainRuleSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ruleSet := plainRuleSet.Upgrade()
|
||||
buffer := new(bytes.Buffer)
|
||||
encoder := json.NewEncoder(buffer)
|
||||
encoder.SetIndent("", " ")
|
||||
err = encoder.Encode(ruleSet)
|
||||
if err != nil {
|
||||
return E.Cause(err, "encode config")
|
||||
}
|
||||
outputPath, _ := filepath.Abs(sourcePath)
|
||||
if !commandRuleSetFormatFlagWrite || sourcePath == "stdin" {
|
||||
os.Stdout.WriteString(buffer.String() + "\n")
|
||||
return nil
|
||||
}
|
||||
if bytes.Equal(content, buffer.Bytes()) {
|
||||
return nil
|
||||
}
|
||||
output, err := os.Create(sourcePath)
|
||||
if err != nil {
|
||||
return E.Cause(err, "open output")
|
||||
}
|
||||
_, err = output.Write(buffer.Bytes())
|
||||
output.Close()
|
||||
if err != nil {
|
||||
return E.Cause(err, "write output")
|
||||
}
|
||||
os.Stderr.WriteString(outputPath + "\n")
|
||||
return nil
|
||||
}
|
@ -38,11 +38,7 @@ func createPreStartedClient() (*box.Box, error) {
|
||||
|
||||
func createDialer(instance *box.Box, network string, outboundTag string) (N.Dialer, error) {
|
||||
if outboundTag == "" {
|
||||
outbound := instance.Router().DefaultOutbound(N.NetworkName(network))
|
||||
if outbound == nil {
|
||||
return nil, E.New("missing default outbound")
|
||||
}
|
||||
return outbound, nil
|
||||
return instance.Router().DefaultOutbound(N.NetworkName(network))
|
||||
} else {
|
||||
outbound, loaded := instance.Router().Outbound(outboundTag)
|
||||
if !loaded {
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
var commandConnectFlagNetwork string
|
||||
|
||||
var commandConnect = &cobra.Command{
|
||||
Use: "connect [address]",
|
||||
Use: "connect <address>",
|
||||
Short: "Connect to an address",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
@ -1,10 +1,10 @@
|
||||
package badjsonmerge
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
|
||||
"github.com/sagernet/sing-box/common/badjson"
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
)
|
||||
|
3
common/contextjson/README.md
Normal file
3
common/contextjson/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# contextjson
|
||||
|
||||
mod from go1.21.4
|
1325
common/contextjson/decode.go
Normal file
1325
common/contextjson/decode.go
Normal file
File diff suppressed because it is too large
Load Diff
49
common/contextjson/decode_context.go
Normal file
49
common/contextjson/decode_context.go
Normal file
@ -0,0 +1,49 @@
|
||||
package json
|
||||
|
||||
import "strconv"
|
||||
|
||||
type decodeContext struct {
|
||||
parent *decodeContext
|
||||
index int
|
||||
key string
|
||||
}
|
||||
|
||||
func (d *decodeState) formatContext() string {
|
||||
var description string
|
||||
context := d.context
|
||||
var appendDot bool
|
||||
for context != nil {
|
||||
if appendDot {
|
||||
description = "." + description
|
||||
}
|
||||
if context.key != "" {
|
||||
description = context.key + description
|
||||
appendDot = true
|
||||
} else {
|
||||
description = "[" + strconv.Itoa(context.index) + "]" + description
|
||||
appendDot = false
|
||||
}
|
||||
context = context.parent
|
||||
}
|
||||
return description
|
||||
}
|
||||
|
||||
type contextError struct {
|
||||
parent error
|
||||
context string
|
||||
index bool
|
||||
}
|
||||
|
||||
func (c *contextError) Unwrap() error {
|
||||
return c.parent
|
||||
}
|
||||
|
||||
func (c *contextError) Error() string {
|
||||
//goland:noinspection GoTypeAssertionOnErrors
|
||||
switch c.parent.(type) {
|
||||
case *contextError:
|
||||
return c.context + "." + c.parent.Error()
|
||||
default:
|
||||
return c.context + ": " + c.parent.Error()
|
||||
}
|
||||
}
|
1283
common/contextjson/encode.go
Normal file
1283
common/contextjson/encode.go
Normal file
File diff suppressed because it is too large
Load Diff
48
common/contextjson/fold.go
Normal file
48
common/contextjson/fold.go
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// foldName returns a folded string such that foldName(x) == foldName(y)
|
||||
// is identical to bytes.EqualFold(x, y).
|
||||
func foldName(in []byte) []byte {
|
||||
// This is inlinable to take advantage of "function outlining".
|
||||
var arr [32]byte // large enough for most JSON names
|
||||
return appendFoldedName(arr[:0], in)
|
||||
}
|
||||
|
||||
func appendFoldedName(out, in []byte) []byte {
|
||||
for i := 0; i < len(in); {
|
||||
// Handle single-byte ASCII.
|
||||
if c := in[i]; c < utf8.RuneSelf {
|
||||
if 'a' <= c && c <= 'z' {
|
||||
c -= 'a' - 'A'
|
||||
}
|
||||
out = append(out, c)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
// Handle multi-byte Unicode.
|
||||
r, n := utf8.DecodeRune(in[i:])
|
||||
out = utf8.AppendRune(out, foldRune(r))
|
||||
i += n
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// foldRune is returns the smallest rune for all runes in the same fold set.
|
||||
func foldRune(r rune) rune {
|
||||
for {
|
||||
r2 := unicode.SimpleFold(r)
|
||||
if r2 <= r {
|
||||
return r2
|
||||
}
|
||||
r = r2
|
||||
}
|
||||
}
|
174
common/contextjson/indent.go
Normal file
174
common/contextjson/indent.go
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import "bytes"
|
||||
|
||||
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
|
||||
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
|
||||
// so that the JSON will be safe to embed inside HTML <script> tags.
|
||||
// For historical reasons, web browsers don't honor standard HTML
|
||||
// escaping within <script> tags, so an alternative JSON encoding must be used.
|
||||
func HTMLEscape(dst *bytes.Buffer, src []byte) {
|
||||
dst.Grow(len(src))
|
||||
dst.Write(appendHTMLEscape(dst.AvailableBuffer(), src))
|
||||
}
|
||||
|
||||
func appendHTMLEscape(dst, src []byte) []byte {
|
||||
// The characters can only appear in string literals,
|
||||
// so just scan the string one byte at a time.
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if c == '<' || c == '>' || c == '&' {
|
||||
dst = append(dst, src[start:i]...)
|
||||
dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
dst = append(dst, src[start:i]...)
|
||||
dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
|
||||
start = i + len("\u2029")
|
||||
}
|
||||
}
|
||||
return append(dst, src[start:]...)
|
||||
}
|
||||
|
||||
// Compact appends to dst the JSON-encoded src with
|
||||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
dst.Grow(len(src))
|
||||
b := dst.AvailableBuffer()
|
||||
b, err := appendCompact(b, src, false)
|
||||
dst.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
func appendCompact(dst, src []byte, escape bool) ([]byte, error) {
|
||||
origLen := len(dst)
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if escape && (c == '<' || c == '>' || c == '&') {
|
||||
dst = append(dst, src[start:i]...)
|
||||
dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
dst = append(dst, src[start:i]...)
|
||||
dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
|
||||
start = i + len("\u2029")
|
||||
}
|
||||
v := scan.step(scan, c)
|
||||
if v >= scanSkipSpace {
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
dst = append(dst, src[start:i]...)
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return dst[:origLen], scan.err
|
||||
}
|
||||
dst = append(dst, src[start:]...)
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func appendNewline(dst []byte, prefix, indent string, depth int) []byte {
|
||||
dst = append(dst, '\n')
|
||||
dst = append(dst, prefix...)
|
||||
for i := 0; i < depth; i++ {
|
||||
dst = append(dst, indent...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// indentGrowthFactor specifies the growth factor of indenting JSON input.
|
||||
// Empirically, the growth factor was measured to be between 1.4x to 1.8x
|
||||
// for some set of compacted JSON with the indent being a single tab.
|
||||
// Specify a growth factor slightly larger than what is observed
|
||||
// to reduce probability of allocation in appendIndent.
|
||||
// A factor no higher than 2 ensures that wasted space never exceeds 50%.
|
||||
const indentGrowthFactor = 2
|
||||
|
||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||
// Each element in a JSON object or array begins on a new,
|
||||
// indented line beginning with prefix followed by one or more
|
||||
// copies of indent according to the indentation nesting.
|
||||
// The data appended to dst does not begin with the prefix nor
|
||||
// any indentation, to make it easier to embed inside other formatted JSON data.
|
||||
// Although leading space characters (space, tab, carriage return, newline)
|
||||
// at the beginning of src are dropped, trailing space characters
|
||||
// at the end of src are preserved and copied to dst.
|
||||
// For example, if src has no trailing spaces, neither will dst;
|
||||
// if src ends in a trailing newline, so will dst.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
dst.Grow(indentGrowthFactor * len(src))
|
||||
b := dst.AvailableBuffer()
|
||||
b, err := appendIndent(b, src, prefix, indent)
|
||||
dst.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
func appendIndent(dst, src []byte, prefix, indent string) ([]byte, error) {
|
||||
origLen := len(dst)
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
needIndent := false
|
||||
depth := 0
|
||||
for _, c := range src {
|
||||
scan.bytes++
|
||||
v := scan.step(scan, c)
|
||||
if v == scanSkipSpace {
|
||||
continue
|
||||
}
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||
needIndent = false
|
||||
depth++
|
||||
dst = appendNewline(dst, prefix, indent, depth)
|
||||
}
|
||||
|
||||
// Emit semantically uninteresting bytes
|
||||
// (in particular, punctuation in strings) unmodified.
|
||||
if v == scanContinue {
|
||||
dst = append(dst, c)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add spacing around real punctuation.
|
||||
switch c {
|
||||
case '{', '[':
|
||||
// delay indent so that empty object and array are formatted as {} and [].
|
||||
needIndent = true
|
||||
dst = append(dst, c)
|
||||
case ',':
|
||||
dst = append(dst, c)
|
||||
dst = appendNewline(dst, prefix, indent, depth)
|
||||
case ':':
|
||||
dst = append(dst, c, ' ')
|
||||
case '}', ']':
|
||||
if needIndent {
|
||||
// suppress indent in empty object/array
|
||||
needIndent = false
|
||||
} else {
|
||||
depth--
|
||||
dst = appendNewline(dst, prefix, indent, depth)
|
||||
}
|
||||
dst = append(dst, c)
|
||||
default:
|
||||
dst = append(dst, c)
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return dst[:origLen], scan.err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
610
common/contextjson/scanner.go
Normal file
610
common/contextjson/scanner.go
Normal file
@ -0,0 +1,610 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
// JSON value parser state machine.
|
||||
// Just about at the limit of what is reasonable to write by hand.
|
||||
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||
// otherwise common code from the multiple scanning functions
|
||||
// in this package (Compact, Indent, checkValid, etc).
|
||||
//
|
||||
// This file starts with two simple examples using the scanner
|
||||
// before diving into the scanner itself.
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Valid reports whether data is a valid JSON encoding.
|
||||
func Valid(data []byte) bool {
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
return checkValid(data, scan) == nil
|
||||
}
|
||||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
// checkValid returns nil or a SyntaxError.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
scan.reset()
|
||||
for _, c := range data {
|
||||
scan.bytes++
|
||||
if scan.step(scan, c) == scanError {
|
||||
return scan.err
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
// Unmarshal will return a SyntaxError if the JSON can't be parsed.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// A scanner is a JSON scanning state machine.
|
||||
// Callers call scan.reset and then pass bytes in one at a time
|
||||
// by calling scan.step(&scan, c) for each byte.
|
||||
// The return value, referred to as an opcode, tells the
|
||||
// caller about significant parsing events like beginning
|
||||
// and ending literals, objects, and arrays, so that the
|
||||
// caller can follow along if it wishes.
|
||||
// The return value scanEnd indicates that a single top-level
|
||||
// JSON value has been completed, *before* the byte that
|
||||
// just got passed in. (The indication must be delayed in order
|
||||
// to recognize the end of numbers: is 123 a whole value or
|
||||
// the beginning of 12345e+6?).
|
||||
type scanner struct {
|
||||
// The step is a func to be called to execute the next transition.
|
||||
// Also tried using an integer constant and a single func
|
||||
// with a switch, but using the func directly was 10% faster
|
||||
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||
step func(*scanner, byte) int
|
||||
|
||||
// Reached end of top-level value.
|
||||
endTop bool
|
||||
|
||||
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||
parseState []int
|
||||
|
||||
// Error that happened, if any.
|
||||
err error
|
||||
|
||||
// total bytes consumed, updated by decoder.Decode (and deliberately
|
||||
// not set to zero by scan.reset)
|
||||
bytes int64
|
||||
}
|
||||
|
||||
var scannerPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &scanner{}
|
||||
},
|
||||
}
|
||||
|
||||
func newScanner() *scanner {
|
||||
scan := scannerPool.Get().(*scanner)
|
||||
// scan.reset by design doesn't set bytes to zero
|
||||
scan.bytes = 0
|
||||
scan.reset()
|
||||
return scan
|
||||
}
|
||||
|
||||
func freeScanner(scan *scanner) {
|
||||
// Avoid hanging on to too much memory in extreme cases.
|
||||
if len(scan.parseState) > 1024 {
|
||||
scan.parseState = nil
|
||||
}
|
||||
scannerPool.Put(scan)
|
||||
}
|
||||
|
||||
// These values are returned by the state transition functions
|
||||
// assigned to scanner.state and the method scanner.eof.
|
||||
// They give details about the current state of the scan that
|
||||
// callers might be interested to know about.
|
||||
// It is okay to ignore the return value of any particular
|
||||
// call to scanner.state: if one call returns scanError,
|
||||
// every subsequent call will return scanError too.
|
||||
const (
|
||||
// Continue.
|
||||
scanContinue = iota // uninteresting byte
|
||||
scanBeginLiteral // end implied by next result != scanContinue
|
||||
scanBeginObject // begin object
|
||||
scanObjectKey // just finished object key (string)
|
||||
scanObjectValue // just finished non-last object value
|
||||
scanEndObject // end object (implies scanObjectValue if possible)
|
||||
scanBeginArray // begin array
|
||||
scanArrayValue // just finished array value
|
||||
scanEndArray // end array (implies scanArrayValue if possible)
|
||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||
|
||||
// Stop.
|
||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||
scanError // hit an error, scanner.err.
|
||||
)
|
||||
|
||||
// These values are stored in the parseState stack.
|
||||
// They give the current state of a composite value
|
||||
// being scanned. If the parser is inside a nested value
|
||||
// the parseState describes the nested state, outermost at entry 0.
|
||||
const (
|
||||
parseObjectKey = iota // parsing object key (before colon)
|
||||
parseObjectValue // parsing object value (after colon)
|
||||
parseArrayValue // parsing array value
|
||||
)
|
||||
|
||||
// This limits the max nesting depth to prevent stack overflow.
|
||||
// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
|
||||
const maxNestingDepth = 10000
|
||||
|
||||
// reset prepares the scanner for use.
|
||||
// It must be called before calling s.step.
|
||||
func (s *scanner) reset() {
|
||||
s.step = stateBeginValue
|
||||
s.parseState = s.parseState[0:0]
|
||||
s.err = nil
|
||||
s.endTop = false
|
||||
}
|
||||
|
||||
// eof tells the scanner that the end of input has been reached.
|
||||
// It returns a scan status just as s.step does.
|
||||
func (s *scanner) eof() int {
|
||||
if s.err != nil {
|
||||
return scanError
|
||||
}
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
s.step(s, ' ')
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
if s.err == nil {
|
||||
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||
}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// pushParseState pushes a new parse state p onto the parse stack.
|
||||
// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
|
||||
func (s *scanner) pushParseState(c byte, newParseState int, successState int) int {
|
||||
s.parseState = append(s.parseState, newParseState)
|
||||
if len(s.parseState) <= maxNestingDepth {
|
||||
return successState
|
||||
}
|
||||
return s.error(c, "exceeded max depth")
|
||||
}
|
||||
|
||||
// popParseState pops a parse state (already obtained) off the stack
|
||||
// and updates s.step accordingly.
|
||||
func (s *scanner) popParseState() {
|
||||
n := len(s.parseState) - 1
|
||||
s.parseState = s.parseState[0:n]
|
||||
if n == 0 {
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
} else {
|
||||
s.step = stateEndValue
|
||||
}
|
||||
}
|
||||
|
||||
func isSpace(c byte) bool {
|
||||
return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
|
||||
}
|
||||
|
||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||
func stateBeginValueOrEmpty(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == ']' {
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginValue(s, c)
|
||||
}
|
||||
|
||||
// stateBeginValue is the state at the beginning of the input.
|
||||
func stateBeginValue(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
switch c {
|
||||
case '{':
|
||||
s.step = stateBeginStringOrEmpty
|
||||
return s.pushParseState(c, parseObjectKey, scanBeginObject)
|
||||
case '[':
|
||||
s.step = stateBeginValueOrEmpty
|
||||
return s.pushParseState(c, parseArrayValue, scanBeginArray)
|
||||
case '"':
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
case '-':
|
||||
s.step = stateNeg
|
||||
return scanBeginLiteral
|
||||
case '0': // beginning of 0.123
|
||||
s.step = state0
|
||||
return scanBeginLiteral
|
||||
case 't': // beginning of true
|
||||
s.step = stateT
|
||||
return scanBeginLiteral
|
||||
case 'f': // beginning of false
|
||||
s.step = stateF
|
||||
return scanBeginLiteral
|
||||
case 'n': // beginning of null
|
||||
s.step = stateN
|
||||
return scanBeginLiteral
|
||||
}
|
||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||
s.step = state1
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of value")
|
||||
}
|
||||
|
||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||
func stateBeginStringOrEmpty(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '}' {
|
||||
n := len(s.parseState)
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginString(s, c)
|
||||
}
|
||||
|
||||
// stateBeginString is the state after reading `{"key": value,`.
|
||||
func stateBeginString(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '"' {
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of object key string")
|
||||
}
|
||||
|
||||
// stateEndValue is the state after completing a value,
|
||||
// such as after reading `{}` or `true` or `["x"`.
|
||||
func stateEndValue(s *scanner, c byte) int {
|
||||
n := len(s.parseState)
|
||||
if n == 0 {
|
||||
// Completed top-level before the current byte.
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
return stateEndTop(s, c)
|
||||
}
|
||||
if isSpace(c) {
|
||||
s.step = stateEndValue
|
||||
return scanSkipSpace
|
||||
}
|
||||
ps := s.parseState[n-1]
|
||||
switch ps {
|
||||
case parseObjectKey:
|
||||
if c == ':' {
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
s.step = stateBeginValue
|
||||
return scanObjectKey
|
||||
}
|
||||
return s.error(c, "after object key")
|
||||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
s.popParseState()
|
||||
return scanEndObject
|
||||
}
|
||||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
s.popParseState()
|
||||
return scanEndArray
|
||||
}
|
||||
return s.error(c, "after array element")
|
||||
}
|
||||
return s.error(c, "")
|
||||
}
|
||||
|
||||
// stateEndTop is the state after finishing the top-level value,
|
||||
// such as after reading `{}` or `[1,2,3]`.
|
||||
// Only space characters should be seen now.
|
||||
func stateEndTop(s *scanner, c byte) int {
|
||||
if !isSpace(c) {
|
||||
// Complain about non-space byte on next call.
|
||||
s.error(c, "after top-level value")
|
||||
}
|
||||
return scanEnd
|
||||
}
|
||||
|
||||
// stateInString is the state after reading `"`.
|
||||
func stateInString(s *scanner, c byte) int {
|
||||
if c == '"' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
if c == '\\' {
|
||||
s.step = stateInStringEsc
|
||||
return scanContinue
|
||||
}
|
||||
if c < 0x20 {
|
||||
return s.error(c, "in string literal")
|
||||
}
|
||||
return scanContinue
|
||||
}
|
||||
|
||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||
func stateInStringEsc(s *scanner, c byte) int {
|
||||
switch c {
|
||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
case 'u':
|
||||
s.step = stateInStringEscU
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in string escape code")
|
||||
}
|
||||
|
||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||
func stateInStringEscU(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU1
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||
func stateInStringEscU1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU12
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||
func stateInStringEscU12(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU123
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||
func stateInStringEscU123(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateNeg is the state after reading `-` during a number.
|
||||
func stateNeg(s *scanner, c byte) int {
|
||||
if c == '0' {
|
||||
s.step = state0
|
||||
return scanContinue
|
||||
}
|
||||
if '1' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in numeric literal")
|
||||
}
|
||||
|
||||
// state1 is the state after reading a non-zero integer during a number,
|
||||
// such as after reading `1` or `100` but not `0`.
|
||||
func state1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return state0(s, c)
|
||||
}
|
||||
|
||||
// state0 is the state after reading `0` during a number.
|
||||
func state0(s *scanner, c byte) int {
|
||||
if c == '.' {
|
||||
s.step = stateDot
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateDot is the state after reading the integer and decimal point in a number,
|
||||
// such as after reading `1.`.
|
||||
func stateDot(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "after decimal point in numeric literal")
|
||||
}
|
||||
|
||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||
// digits of a number, such as after reading `3.14`.
|
||||
func stateDot0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateE is the state after reading the mantissa and e in a number,
|
||||
// such as after reading `314e` or `0.314e`.
|
||||
func stateE(s *scanner, c byte) int {
|
||||
if c == '+' || c == '-' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
return stateESign(s, c)
|
||||
}
|
||||
|
||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||
// such as after reading `314e-` or `0.314e+`.
|
||||
func stateESign(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in exponent of numeric literal")
|
||||
}
|
||||
|
||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||
// and at least one digit of the exponent in a number,
|
||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||
func stateE0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateT is the state after reading `t`.
|
||||
func stateT(s *scanner, c byte) int {
|
||||
if c == 'r' {
|
||||
s.step = stateTr
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'r')")
|
||||
}
|
||||
|
||||
// stateTr is the state after reading `tr`.
|
||||
func stateTr(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateTru
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateTru is the state after reading `tru`.
|
||||
func stateTru(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateF is the state after reading `f`.
|
||||
func stateF(s *scanner, c byte) int {
|
||||
if c == 'a' {
|
||||
s.step = stateFa
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'a')")
|
||||
}
|
||||
|
||||
// stateFa is the state after reading `fa`.
|
||||
func stateFa(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateFal
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateFal is the state after reading `fal`.
|
||||
func stateFal(s *scanner, c byte) int {
|
||||
if c == 's' {
|
||||
s.step = stateFals
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 's')")
|
||||
}
|
||||
|
||||
// stateFals is the state after reading `fals`.
|
||||
func stateFals(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateN is the state after reading `n`.
|
||||
func stateN(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateNu
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateNu is the state after reading `nu`.
|
||||
func stateNu(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateNul
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateNul is the state after reading `nul`.
|
||||
func stateNul(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
func stateError(s *scanner, c byte) int {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// error records an error and switches to the error state.
|
||||
func (s *scanner) error(c byte, context string) int {
|
||||
s.step = stateError
|
||||
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// quoteChar formats c as a quoted character literal.
|
||||
func quoteChar(c byte) string {
|
||||
// special cases - different from quoted strings
|
||||
if c == '\'' {
|
||||
return `'\''`
|
||||
}
|
||||
if c == '"' {
|
||||
return `'"'`
|
||||
}
|
||||
|
||||
// use quoted string with different quotation marks
|
||||
s := strconv.Quote(string(c))
|
||||
return "'" + s[1:len(s)-1] + "'"
|
||||
}
|
513
common/contextjson/stream.go
Normal file
513
common/contextjson/stream.go
Normal file
@ -0,0 +1,513 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Decoder reads and decodes JSON values from an input stream.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
d decodeState
|
||||
scanp int // start of unread data in buf
|
||||
scanned int64 // amount of data already scanned
|
||||
scan scanner
|
||||
err error
|
||||
|
||||
tokenState int
|
||||
tokenStack []int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may
|
||||
// read data from r beyond the JSON values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||
|
||||
// DisallowUnknownFields causes the Decoder to return an error when the destination
|
||||
// is a struct and the input contains object keys which do not match any
|
||||
// non-ignored, exported fields in the destination.
|
||||
func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
|
||||
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about
|
||||
// the conversion of JSON into a Go value.
|
||||
func (dec *Decoder) Decode(v any) error {
|
||||
if dec.err != nil {
|
||||
return dec.err
|
||||
}
|
||||
|
||||
if err := dec.tokenPrepareForDecode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dec.tokenValueAllowed() {
|
||||
return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()}
|
||||
}
|
||||
|
||||
// Read whole value into buffer.
|
||||
n, err := dec.readValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
||||
dec.scanp += n
|
||||
|
||||
// Don't save err from unmarshal into dec.err:
|
||||
// the connection is still usable since we read a complete JSON
|
||||
// object from it before the error happened.
|
||||
err = dec.d.unmarshal(v)
|
||||
|
||||
// fixup token streaming state
|
||||
dec.tokenValueEnd()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's
|
||||
// buffer. The reader is valid until the next call to Decode.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||
}
|
||||
|
||||
// readValue reads a JSON value into dec.buf.
|
||||
// It returns the length of the encoding.
|
||||
func (dec *Decoder) readValue() (int, error) {
|
||||
dec.scan.reset()
|
||||
|
||||
scanp := dec.scanp
|
||||
var err error
|
||||
Input:
|
||||
// help the compiler see that scanp is never negative, so it can remove
|
||||
// some bounds checks below.
|
||||
for scanp >= 0 {
|
||||
|
||||
// Look in the buffer for a new value.
|
||||
for ; scanp < len(dec.buf); scanp++ {
|
||||
c := dec.buf[scanp]
|
||||
dec.scan.bytes++
|
||||
switch dec.scan.step(&dec.scan, c) {
|
||||
case scanEnd:
|
||||
// scanEnd is delayed one byte so we decrement
|
||||
// the scanner bytes count by 1 to ensure that
|
||||
// this value is correct in the next call of Decode.
|
||||
dec.scan.bytes--
|
||||
break Input
|
||||
case scanEndObject, scanEndArray:
|
||||
// scanEnd is delayed one byte.
|
||||
// We might block trying to get that byte from src,
|
||||
// so instead invent a space byte.
|
||||
if stateEndValue(&dec.scan, ' ') == scanEnd {
|
||||
scanp++
|
||||
break Input
|
||||
}
|
||||
case scanError:
|
||||
dec.err = dec.scan.err
|
||||
return 0, dec.scan.err
|
||||
}
|
||||
}
|
||||
|
||||
// Did the last read have an error?
|
||||
// Delayed until now to allow buffer scan.
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
break Input
|
||||
}
|
||||
if nonSpace(dec.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
dec.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := scanp - dec.scanp
|
||||
err = dec.refill()
|
||||
scanp = dec.scanp + n
|
||||
}
|
||||
return scanp - dec.scanp, nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) refill() error {
|
||||
// Make room to read more into the buffer.
|
||||
// First slide down data already consumed.
|
||||
if dec.scanp > 0 {
|
||||
dec.scanned += int64(dec.scanp)
|
||||
n := copy(dec.buf, dec.buf[dec.scanp:])
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.scanp = 0
|
||||
}
|
||||
|
||||
// Grow buffer if not large enough.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||
copy(newBuf, dec.buf)
|
||||
dec.buf = newBuf
|
||||
}
|
||||
|
||||
// Read. Delay error for next iteration (after scan).
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func nonSpace(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if !isSpace(c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An Encoder writes JSON values to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
err error
|
||||
escapeHTML bool
|
||||
|
||||
indentBuf []byte
|
||||
indentPrefix string
|
||||
indentValue string
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w, escapeHTML: true}
|
||||
}
|
||||
|
||||
// Encode writes the JSON encoding of v to the stream,
|
||||
// followed by a newline character.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (enc *Encoder) Encode(v any) error {
|
||||
if enc.err != nil {
|
||||
return enc.err
|
||||
}
|
||||
|
||||
e := newEncodeState()
|
||||
defer encodeStatePool.Put(e)
|
||||
|
||||
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Terminate each value with a newline.
|
||||
// This makes the output look a little nicer
|
||||
// when debugging, and some kind of space
|
||||
// is required if the encoded value was a number,
|
||||
// so that the reader knows there aren't more
|
||||
// digits coming.
|
||||
e.WriteByte('\n')
|
||||
|
||||
b := e.Bytes()
|
||||
if enc.indentPrefix != "" || enc.indentValue != "" {
|
||||
enc.indentBuf, err = appendIndent(enc.indentBuf[:0], b, enc.indentPrefix, enc.indentValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b = enc.indentBuf
|
||||
}
|
||||
if _, err = enc.w.Write(b); err != nil {
|
||||
enc.err = err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetIndent instructs the encoder to format each subsequent encoded
|
||||
// value as if indented by the package-level function Indent(dst, src, prefix, indent).
|
||||
// Calling SetIndent("", "") disables indentation.
|
||||
func (enc *Encoder) SetIndent(prefix, indent string) {
|
||||
enc.indentPrefix = prefix
|
||||
enc.indentValue = indent
|
||||
}
|
||||
|
||||
// SetEscapeHTML specifies whether problematic HTML characters
|
||||
// should be escaped inside JSON quoted strings.
|
||||
// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e
|
||||
// to avoid certain safety problems that can arise when embedding JSON in HTML.
|
||||
//
|
||||
// In non-HTML settings where the escaping interferes with the readability
|
||||
// of the output, SetEscapeHTML(false) disables this behavior.
|
||||
func (enc *Encoder) SetEscapeHTML(on bool) {
|
||||
enc.escapeHTML = on
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded JSON value.
|
||||
// It implements Marshaler and Unmarshaler and can
|
||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalJSON returns m as the JSON encoding of m.
|
||||
func (m RawMessage) MarshalJSON() ([]byte, error) {
|
||||
if m == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *m to a copy of data.
|
||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ Marshaler = (*RawMessage)(nil)
|
||||
_ Unmarshaler = (*RawMessage)(nil)
|
||||
)
|
||||
|
||||
// A Token holds a value of one of these types:
|
||||
//
|
||||
// Delim, for the four JSON delimiters [ ] { }
|
||||
// bool, for JSON booleans
|
||||
// float64, for JSON numbers
|
||||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
type Token any
|
||||
|
||||
const (
|
||||
tokenTopValue = iota
|
||||
tokenArrayStart
|
||||
tokenArrayValue
|
||||
tokenArrayComma
|
||||
tokenObjectStart
|
||||
tokenObjectKey
|
||||
tokenObjectColon
|
||||
tokenObjectValue
|
||||
tokenObjectComma
|
||||
)
|
||||
|
||||
// advance tokenstate from a separator state to a value state
|
||||
func (dec *Decoder) tokenPrepareForDecode() error {
|
||||
// Note: Not calling peek before switch, to avoid
|
||||
// putting peek into the standard Decode path.
|
||||
// peek is only called when using the Token API.
|
||||
switch dec.tokenState {
|
||||
case tokenArrayComma:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ',' {
|
||||
return &SyntaxError{"expected comma after array element", dec.InputOffset()}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
case tokenObjectColon:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ':' {
|
||||
return &SyntaxError{"expected colon after object key", dec.InputOffset()}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueAllowed() bool {
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueEnd() {
|
||||
switch dec.tokenState {
|
||||
case tokenArrayStart, tokenArrayValue:
|
||||
dec.tokenState = tokenArrayComma
|
||||
case tokenObjectValue:
|
||||
dec.tokenState = tokenObjectComma
|
||||
}
|
||||
}
|
||||
|
||||
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
||||
type Delim rune
|
||||
|
||||
func (d Delim) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
// Token returns the next JSON token in the input stream.
|
||||
// At the end of the input stream, Token returns nil, io.EOF.
|
||||
//
|
||||
// Token guarantees that the delimiters [ ] { } it returns are
|
||||
// properly nested and matched: if Token encounters an unexpected
|
||||
// delimiter in the input, it will return an error.
|
||||
//
|
||||
// The input stream consists of basic JSON values—bool, string,
|
||||
// number, and null—along with delimiters [ ] { } of type Delim
|
||||
// to mark the start and end of arrays and objects.
|
||||
// Commas and colons are elided.
|
||||
func (dec *Decoder) Token() (Token, error) {
|
||||
for {
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch c {
|
||||
case '[':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenArrayStart
|
||||
return Delim('['), nil
|
||||
|
||||
case ']':
|
||||
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim(']'), nil
|
||||
|
||||
case '{':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenObjectStart
|
||||
return Delim('{'), nil
|
||||
|
||||
case '}':
|
||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim('}'), nil
|
||||
|
||||
case ':':
|
||||
if dec.tokenState != tokenObjectColon {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
continue
|
||||
|
||||
case ',':
|
||||
if dec.tokenState == tokenArrayComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
continue
|
||||
}
|
||||
if dec.tokenState == tokenObjectComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectKey
|
||||
continue
|
||||
}
|
||||
return dec.tokenError(c)
|
||||
|
||||
case '"':
|
||||
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
||||
var x string
|
||||
old := dec.tokenState
|
||||
dec.tokenState = tokenTopValue
|
||||
err := dec.Decode(&x)
|
||||
dec.tokenState = old
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dec.tokenState = tokenObjectColon
|
||||
return x, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
var x any
|
||||
if err := dec.Decode(&x); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
||||
var context string
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayComma:
|
||||
context = " after array element"
|
||||
case tokenObjectKey:
|
||||
context = " looking for beginning of object key string"
|
||||
case tokenObjectColon:
|
||||
context = " after object key"
|
||||
case tokenObjectComma:
|
||||
context = " after object key:value pair"
|
||||
}
|
||||
return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()}
|
||||
}
|
||||
|
||||
// More reports whether there is another element in the
|
||||
// current array or object being parsed.
|
||||
func (dec *Decoder) More() bool {
|
||||
c, err := dec.peek()
|
||||
return err == nil && c != ']' && c != '}'
|
||||
}
|
||||
|
||||
func (dec *Decoder) peek() (byte, error) {
|
||||
var err error
|
||||
for {
|
||||
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||
c := dec.buf[i]
|
||||
if isSpace(c) {
|
||||
continue
|
||||
}
|
||||
dec.scanp = i
|
||||
return c, nil
|
||||
}
|
||||
// buffer has been scanned, now report any error
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dec.refill()
|
||||
}
|
||||
}
|
||||
|
||||
// InputOffset returns the input stream byte offset of the current decoder position.
|
||||
// The offset gives the location of the end of the most recently returned token
|
||||
// and the beginning of the next token.
|
||||
func (dec *Decoder) InputOffset() int64 {
|
||||
return dec.scanned + int64(dec.scanp)
|
||||
}
|
218
common/contextjson/tables.go
Normal file
218
common/contextjson/tables.go
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// safeSet holds the value true if the ASCII character with the given array
|
||||
// position can be represented inside a JSON string without any further
|
||||
// escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), and the backslash character ("\").
|
||||
var safeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': true,
|
||||
'=': true,
|
||||
'>': true,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
||||
|
||||
// htmlSafeSet holds the value true if the ASCII character with the given
|
||||
// array position can be safely represented inside a JSON string, embedded
|
||||
// inside of HTML <script> tags, without any additional escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), the backslash character ("\"), HTML opening and closing
|
||||
// tags ("<" and ">"), and the ampersand ("&").
|
||||
var htmlSafeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': false,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': false,
|
||||
'=': true,
|
||||
'>': false,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
38
common/contextjson/tags.go
Normal file
38
common/contextjson/tags.go
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
tag, opt, _ := strings.Cut(tag, ",")
|
||||
return tag, tagOptions(opt)
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var name string
|
||||
name, s, _ = strings.Cut(s, ",")
|
||||
if name == optionName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@ -18,11 +18,19 @@ func NewRouter(router adapter.Router) N.Dialer {
|
||||
}
|
||||
|
||||
func (d *RouterDialer) DialContext(ctx context.Context, network string, destination M.Socksaddr) (net.Conn, error) {
|
||||
return d.router.DefaultOutbound(network).DialContext(ctx, network, destination)
|
||||
dialer, err := d.router.DefaultOutbound(network)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dialer.DialContext(ctx, network, destination)
|
||||
}
|
||||
|
||||
func (d *RouterDialer) ListenPacket(ctx context.Context, destination M.Socksaddr) (net.PacketConn, error) {
|
||||
return d.router.DefaultOutbound(N.NetworkUDP).ListenPacket(ctx, destination)
|
||||
dialer, err := d.router.DefaultOutbound(N.NetworkUDP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dialer.ListenPacket(ctx, destination)
|
||||
}
|
||||
|
||||
func (d *RouterDialer) Upstream() any {
|
||||
|
21
common/json/context.go
Normal file
21
common/json/context.go
Normal file
@ -0,0 +1,21 @@
|
||||
//go:build go1.21 && !without_contextjson
|
||||
|
||||
package json
|
||||
|
||||
import "github.com/sagernet/sing-box/common/contextjson"
|
||||
|
||||
var (
|
||||
Marshal = json.Marshal
|
||||
Unmarshal = json.Unmarshal
|
||||
NewEncoder = json.NewEncoder
|
||||
NewDecoder = json.NewDecoder
|
||||
)
|
||||
|
||||
type (
|
||||
Encoder = json.Encoder
|
||||
Decoder = json.Decoder
|
||||
Token = json.Token
|
||||
Delim = json.Delim
|
||||
SyntaxError = json.SyntaxError
|
||||
RawMessage = json.RawMessage
|
||||
)
|
@ -1,3 +1,5 @@
|
||||
//go:build !go1.21 || without_contextjson
|
||||
|
||||
package json
|
||||
|
||||
import "encoding/json"
|
||||
@ -15,4 +17,5 @@ type (
|
||||
Token = json.Token
|
||||
Delim = json.Delim
|
||||
SyntaxError = json.SyntaxError
|
||||
RawMessage = json.RawMessage
|
||||
)
|
||||
|
485
common/srs/binary.go
Normal file
485
common/srs/binary.go
Normal file
@ -0,0 +1,485 @@
|
||||
package srs
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net/netip"
|
||||
|
||||
C "github.com/sagernet/sing-box/constant"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
"github.com/sagernet/sing/common"
|
||||
"github.com/sagernet/sing/common/domain"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
"github.com/sagernet/sing/common/rw"
|
||||
|
||||
"go4.org/netipx"
|
||||
)
|
||||
|
||||
var MagicBytes = [3]byte{0x53, 0x52, 0x53} // SRS
|
||||
|
||||
const (
|
||||
ruleItemQueryType uint8 = iota
|
||||
ruleItemNetwork
|
||||
ruleItemDomain
|
||||
ruleItemDomainKeyword
|
||||
ruleItemDomainRegex
|
||||
ruleItemSourceIPCIDR
|
||||
ruleItemIPCIDR
|
||||
ruleItemSourcePort
|
||||
ruleItemSourcePortRange
|
||||
ruleItemPort
|
||||
ruleItemPortRange
|
||||
ruleItemProcessName
|
||||
ruleItemProcessPath
|
||||
ruleItemPackageName
|
||||
ruleItemWIFISSID
|
||||
ruleItemWIFIBSSID
|
||||
ruleItemFinal uint8 = 0xFF
|
||||
)
|
||||
|
||||
func Read(reader io.Reader, recovery bool) (ruleSet option.PlainRuleSet, err error) {
|
||||
var magicBytes [3]byte
|
||||
_, err = io.ReadFull(reader, magicBytes[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if magicBytes != MagicBytes {
|
||||
err = E.New("invalid sing-box rule set file")
|
||||
return
|
||||
}
|
||||
var version uint8
|
||||
err = binary.Read(reader, binary.BigEndian, &version)
|
||||
if err != nil {
|
||||
return ruleSet, err
|
||||
}
|
||||
if version != 1 {
|
||||
return ruleSet, E.New("unsupported version: ", version)
|
||||
}
|
||||
zReader, err := zlib.NewReader(reader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
length, err := rw.ReadUVariant(zReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ruleSet.Rules = make([]option.HeadlessRule, length)
|
||||
for i := uint64(0); i < length; i++ {
|
||||
ruleSet.Rules[i], err = readRule(zReader, recovery)
|
||||
if err != nil {
|
||||
err = E.Cause(err, "read rule[", i, "]")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Write(writer io.Writer, ruleSet option.PlainRuleSet) error {
|
||||
_, err := writer.Write(MagicBytes[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(writer, binary.BigEndian, uint8(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zWriter, err := zlib.NewWriterLevel(writer, zlib.BestCompression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rw.WriteUVariant(zWriter, uint64(len(ruleSet.Rules)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rule := range ruleSet.Rules {
|
||||
err = writeRule(zWriter, rule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return zWriter.Close()
|
||||
}
|
||||
|
||||
func readRule(reader io.Reader, recovery bool) (rule option.HeadlessRule, err error) {
|
||||
var ruleType uint8
|
||||
err = binary.Read(reader, binary.BigEndian, &ruleType)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch ruleType {
|
||||
case 0:
|
||||
rule.DefaultOptions, err = readDefaultRule(reader, recovery)
|
||||
case 1:
|
||||
rule.LogicalOptions, err = readLogicalRule(reader, recovery)
|
||||
default:
|
||||
err = E.New("unknown rule type: ", ruleType)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeRule(writer io.Writer, rule option.HeadlessRule) error {
|
||||
switch rule.Type {
|
||||
case C.RuleTypeDefault:
|
||||
return writeDefaultRule(writer, rule.DefaultOptions)
|
||||
case C.RuleTypeLogical:
|
||||
return writeLogicalRule(writer, rule.LogicalOptions)
|
||||
default:
|
||||
panic("unknown rule type: " + rule.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadlessRule, err error) {
|
||||
var lastItemType uint8
|
||||
for {
|
||||
var itemType uint8
|
||||
err = binary.Read(reader, binary.BigEndian, &itemType)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch itemType {
|
||||
case ruleItemQueryType:
|
||||
var rawQueryType []uint16
|
||||
rawQueryType, err = readRuleItemUint16(reader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rule.QueryType = common.Map(rawQueryType, func(it uint16) option.DNSQueryType {
|
||||
return option.DNSQueryType(it)
|
||||
})
|
||||
case ruleItemNetwork:
|
||||
rule.Network, err = readRuleItemString(reader)
|
||||
case ruleItemDomain:
|
||||
var matcher *domain.Matcher
|
||||
matcher, err = domain.ReadMatcher(reader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rule.DomainMatcher = matcher
|
||||
case ruleItemDomainKeyword:
|
||||
rule.DomainKeyword, err = readRuleItemString(reader)
|
||||
case ruleItemDomainRegex:
|
||||
rule.DomainRegex, err = readRuleItemString(reader)
|
||||
case ruleItemSourceIPCIDR:
|
||||
rule.SourceIPSet, err = readIPSet(reader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if recovery {
|
||||
rule.SourceIPCIDR = common.Map(rule.SourceIPSet.Prefixes(), netip.Prefix.String)
|
||||
}
|
||||
case ruleItemIPCIDR:
|
||||
rule.IPSet, err = readIPSet(reader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if recovery {
|
||||
rule.IPCIDR = common.Map(rule.IPSet.Prefixes(), netip.Prefix.String)
|
||||
}
|
||||
case ruleItemSourcePort:
|
||||
rule.SourcePort, err = readRuleItemUint16(reader)
|
||||
case ruleItemSourcePortRange:
|
||||
rule.SourcePortRange, err = readRuleItemString(reader)
|
||||
case ruleItemPort:
|
||||
rule.Port, err = readRuleItemUint16(reader)
|
||||
case ruleItemPortRange:
|
||||
rule.PortRange, err = readRuleItemString(reader)
|
||||
case ruleItemProcessName:
|
||||
rule.ProcessName, err = readRuleItemString(reader)
|
||||
case ruleItemProcessPath:
|
||||
rule.ProcessPath, err = readRuleItemString(reader)
|
||||
case ruleItemPackageName:
|
||||
rule.PackageName, err = readRuleItemString(reader)
|
||||
case ruleItemWIFISSID:
|
||||
rule.WIFISSID, err = readRuleItemString(reader)
|
||||
case ruleItemWIFIBSSID:
|
||||
rule.WIFIBSSID, err = readRuleItemString(reader)
|
||||
case ruleItemFinal:
|
||||
err = binary.Read(reader, binary.BigEndian, &rule.Invert)
|
||||
return
|
||||
default:
|
||||
err = E.New("unknown rule item type: ", itemType, ", last type: ", lastItemType)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lastItemType = itemType
|
||||
}
|
||||
}
|
||||
|
||||
func writeDefaultRule(writer io.Writer, rule option.DefaultHeadlessRule) error {
|
||||
err := binary.Write(writer, binary.BigEndian, uint8(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(rule.QueryType) > 0 {
|
||||
err = writeRuleItemUint16(writer, ruleItemQueryType, common.Map(rule.QueryType, func(it option.DNSQueryType) uint16 {
|
||||
return uint16(it)
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.Network) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemNetwork, rule.Network)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.Domain) > 0 || len(rule.DomainSuffix) > 0 {
|
||||
err = binary.Write(writer, binary.BigEndian, ruleItemDomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = domain.NewMatcher(rule.Domain, rule.DomainSuffix).Write(writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.DomainKeyword) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemDomainKeyword, rule.DomainKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.DomainRegex) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemDomainRegex, rule.DomainRegex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.SourceIPCIDR) > 0 {
|
||||
err = writeRuleItemCIDR(writer, ruleItemSourceIPCIDR, rule.SourceIPCIDR)
|
||||
if err != nil {
|
||||
return E.Cause(err, "source_ipcidr")
|
||||
}
|
||||
}
|
||||
if len(rule.IPCIDR) > 0 {
|
||||
err = writeRuleItemCIDR(writer, ruleItemIPCIDR, rule.IPCIDR)
|
||||
if err != nil {
|
||||
return E.Cause(err, "ipcidr")
|
||||
}
|
||||
}
|
||||
if len(rule.SourcePort) > 0 {
|
||||
err = writeRuleItemUint16(writer, ruleItemSourcePort, rule.SourcePort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.SourcePortRange) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemSourcePortRange, rule.SourcePortRange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.Port) > 0 {
|
||||
err = writeRuleItemUint16(writer, ruleItemPort, rule.Port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.PortRange) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemPortRange, rule.PortRange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.ProcessName) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemProcessName, rule.ProcessName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.ProcessPath) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemProcessPath, rule.ProcessPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.PackageName) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemPackageName, rule.PackageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.WIFISSID) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemWIFISSID, rule.WIFISSID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rule.WIFIBSSID) > 0 {
|
||||
err = writeRuleItemString(writer, ruleItemWIFIBSSID, rule.WIFIBSSID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = binary.Write(writer, binary.BigEndian, ruleItemFinal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(writer, binary.BigEndian, rule.Invert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readRuleItemString(reader io.Reader) ([]string, error) {
|
||||
length, err := rw.ReadUVariant(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := make([]string, length)
|
||||
for i := uint64(0); i < length; i++ {
|
||||
value[i], err = rw.ReadVString(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func writeRuleItemString(writer io.Writer, itemType uint8, value []string) error {
|
||||
err := binary.Write(writer, binary.BigEndian, itemType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rw.WriteUVariant(writer, uint64(len(value)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range value {
|
||||
err = rw.WriteVString(writer, item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readRuleItemUint16(reader io.Reader) ([]uint16, error) {
|
||||
length, err := rw.ReadUVariant(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := make([]uint16, length)
|
||||
for i := uint64(0); i < length; i++ {
|
||||
err = binary.Read(reader, binary.BigEndian, &value[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func writeRuleItemUint16(writer io.Writer, itemType uint8, value []uint16) error {
|
||||
err := binary.Write(writer, binary.BigEndian, itemType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rw.WriteUVariant(writer, uint64(len(value)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range value {
|
||||
err = binary.Write(writer, binary.BigEndian, item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeRuleItemCIDR(writer io.Writer, itemType uint8, value []string) error {
|
||||
var builder netipx.IPSetBuilder
|
||||
for i, prefixString := range value {
|
||||
prefix, err := netip.ParsePrefix(prefixString)
|
||||
if err == nil {
|
||||
builder.AddPrefix(prefix)
|
||||
continue
|
||||
}
|
||||
addr, addrErr := netip.ParseAddr(prefixString)
|
||||
if addrErr == nil {
|
||||
builder.Add(addr)
|
||||
continue
|
||||
}
|
||||
return E.Cause(err, "parse [", i, "]")
|
||||
}
|
||||
ipSet, err := builder.IPSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(writer, binary.BigEndian, itemType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeIPSet(writer, ipSet)
|
||||
}
|
||||
|
||||
func readLogicalRule(reader io.Reader, recovery bool) (logicalRule option.LogicalHeadlessRule, err error) {
|
||||
var mode uint8
|
||||
err = binary.Read(reader, binary.BigEndian, &mode)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch mode {
|
||||
case 0:
|
||||
logicalRule.Mode = C.LogicalTypeAnd
|
||||
case 1:
|
||||
logicalRule.Mode = C.LogicalTypeOr
|
||||
default:
|
||||
err = E.New("unknown logical mode: ", mode)
|
||||
return
|
||||
}
|
||||
length, err := rw.ReadUVariant(reader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logicalRule.Rules = make([]option.HeadlessRule, length)
|
||||
for i := uint64(0); i < length; i++ {
|
||||
logicalRule.Rules[i], err = readRule(reader, recovery)
|
||||
if err != nil {
|
||||
err = E.Cause(err, "read logical rule [", i, "]")
|
||||
return
|
||||
}
|
||||
}
|
||||
err = binary.Read(reader, binary.BigEndian, &logicalRule.Invert)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeLogicalRule(writer io.Writer, logicalRule option.LogicalHeadlessRule) error {
|
||||
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch logicalRule.Mode {
|
||||
case C.LogicalTypeAnd:
|
||||
err = binary.Write(writer, binary.BigEndian, uint8(0))
|
||||
case C.LogicalTypeOr:
|
||||
err = binary.Write(writer, binary.BigEndian, uint8(1))
|
||||
default:
|
||||
panic("unknown logical mode: " + logicalRule.Mode)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rw.WriteUVariant(writer, uint64(len(logicalRule.Rules)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rule := range logicalRule.Rules {
|
||||
err = writeRule(writer, rule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = binary.Write(writer, binary.BigEndian, logicalRule.Invert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
116
common/srs/ip_set.go
Normal file
116
common/srs/ip_set.go
Normal file
@ -0,0 +1,116 @@
|
||||
package srs
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net/netip"
|
||||
"unsafe"
|
||||
|
||||
"github.com/sagernet/sing/common/rw"
|
||||
|
||||
"go4.org/netipx"
|
||||
)
|
||||
|
||||
type myIPSet struct {
|
||||
rr []myIPRange
|
||||
}
|
||||
|
||||
type myIPRange struct {
|
||||
from netip.Addr
|
||||
to netip.Addr
|
||||
}
|
||||
|
||||
func readIPSet(reader io.Reader) (*netipx.IPSet, error) {
|
||||
var version uint8
|
||||
err := binary.Read(reader, binary.BigEndian, &version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var length uint64
|
||||
err = binary.Read(reader, binary.BigEndian, &length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mySet := &myIPSet{
|
||||
rr: make([]myIPRange, length),
|
||||
}
|
||||
for i := uint64(0); i < length; i++ {
|
||||
var (
|
||||
fromLen uint64
|
||||
toLen uint64
|
||||
fromAddr netip.Addr
|
||||
toAddr netip.Addr
|
||||
)
|
||||
fromLen, err = rw.ReadUVariant(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fromBytes := make([]byte, fromLen)
|
||||
_, err = io.ReadFull(reader, fromBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = fromAddr.UnmarshalBinary(fromBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toLen, err = rw.ReadUVariant(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toBytes := make([]byte, toLen)
|
||||
_, err = io.ReadFull(reader, toBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = toAddr.UnmarshalBinary(toBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mySet.rr[i] = myIPRange{fromAddr, toAddr}
|
||||
}
|
||||
return (*netipx.IPSet)(unsafe.Pointer(mySet)), nil
|
||||
}
|
||||
|
||||
func writeIPSet(writer io.Writer, set *netipx.IPSet) error {
|
||||
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mySet := (*myIPSet)(unsafe.Pointer(set))
|
||||
err = binary.Write(writer, binary.BigEndian, uint64(len(mySet.rr)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rr := range mySet.rr {
|
||||
var (
|
||||
fromBinary []byte
|
||||
toBinary []byte
|
||||
)
|
||||
fromBinary, err = rr.from.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rw.WriteUVariant(writer, uint64(len(fromBinary)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = writer.Write(fromBinary)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
toBinary, err = rr.to.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rw.WriteUVariant(writer, uint64(len(toBinary)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = writer.Write(toBinary)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@ -9,3 +9,11 @@ const (
|
||||
LogicalTypeAnd = "and"
|
||||
LogicalTypeOr = "or"
|
||||
)
|
||||
|
||||
const (
|
||||
RuleSetTypeLocal = "local"
|
||||
RuleSetTypeRemote = "remote"
|
||||
RuleSetVersion1 = 1
|
||||
RuleSetFormatSource = "source"
|
||||
RuleSetFormatBinary = "binary"
|
||||
)
|
||||
|
@ -4,10 +4,89 @@ icon: material/alert-decagram
|
||||
|
||||
# ChangeLog
|
||||
|
||||
#### 1.8.0-alpha.8
|
||||
|
||||
* Add context to JSON decode error message **1**
|
||||
* Reject internal fake-ip queries **2**
|
||||
* Fixes and improvements
|
||||
|
||||
**1**:
|
||||
|
||||
JSON parse errors will now include the current key path.
|
||||
Only takes effect when compiled with Go 1.21+.
|
||||
|
||||
**2**:
|
||||
|
||||
All internal DNS queries now skip DNS rules with `server` type `fakeip`,
|
||||
and the default DNS server can no longer be `fakeip`.
|
||||
|
||||
This change is intended to break incorrect usage and essentially requires no action.
|
||||
|
||||
#### 1.8.0-alpha.7
|
||||
|
||||
* Fixes and improvements
|
||||
|
||||
#### 1.7.1
|
||||
|
||||
* Fixes and improvements
|
||||
|
||||
#### 1.8.0-alpha.6
|
||||
|
||||
* Fix rule-set matching logic **1**
|
||||
* Fixes and improvements
|
||||
|
||||
**1**:
|
||||
|
||||
Now the rules in the `rule_set` rule item can be logically considered to be merged into the rule using rule sets,
|
||||
rather than completely following the AND logic.
|
||||
|
||||
#### 1.8.0-alpha.5
|
||||
|
||||
* Parallel rule-set initialization
|
||||
* Independent `source_ip_is_private` and `ip_is_private` rules **1**
|
||||
|
||||
**1**:
|
||||
|
||||
The `private` GeoIP country never existed and was actually implemented inside V2Ray.
|
||||
Since GeoIP was deprecated, we made this rule independent, see [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||
|
||||
#### 1.8.0-alpha.1
|
||||
|
||||
* Migrate cache file from Clash API to independent options **1**
|
||||
* Introducing [Rule Set](/configuration/rule-set) **2**
|
||||
* Add `sing-box geoip`, `sing-box geosite` and `sing-box rule-set` commands **3**
|
||||
* Allow nested logical rules **4**
|
||||
|
||||
**1**:
|
||||
|
||||
See [Cache File](/configuration/experimental/cache-file) and
|
||||
[Migration](/migration/#migrate-cache-file-from-clash-api-to-independent-options).
|
||||
|
||||
**2**:
|
||||
|
||||
Rule set is independent collections of rules that can be compiled into binaries to improve performance.
|
||||
Compared to legacy GeoIP and Geosite resources,
|
||||
it can include more types of rules, load faster,
|
||||
use less memory, and update automatically.
|
||||
|
||||
See [Route#rule_set](/configuration/route/#rule_set),
|
||||
[Route Rule](/configuration/route/rule),
|
||||
[DNS Rule](/configuration/dns/rule),
|
||||
[Rule Set](/configuration/rule-set),
|
||||
[Source Format](/configuration/rule-set/source-format) and
|
||||
[Headless Rule](/configuration/rule-set/headless-rule).
|
||||
|
||||
For GEO resources migration, see [Migrate GeoIP to rule sets](/migration/#migrate-geoip-to-rule-sets) and
|
||||
[Migrate Geosite to rule sets](/migration/#migrate-geosite-to-rule-sets).
|
||||
|
||||
**3**:
|
||||
|
||||
New commands manage GeoIP, Geosite and rule set resources, and help you migrate GEO resources to rule sets.
|
||||
|
||||
**4**:
|
||||
|
||||
Logical rules in route rules, DNS rules, and the new headless rule now allow nesting of logical rules.
|
||||
|
||||
#### 1.7.0
|
||||
|
||||
* Fixes and improvements
|
||||
@ -146,11 +225,13 @@ Only supported in graphical clients on Android and iOS.
|
||||
|
||||
**1**:
|
||||
|
||||
Starting in 1.7.0, multiplexing support is no longer enabled by default and needs to be turned on explicitly in inbound options.
|
||||
Starting in 1.7.0, multiplexing support is no longer enabled by default and needs to be turned on explicitly in inbound
|
||||
options.
|
||||
|
||||
**2**
|
||||
|
||||
Hysteria Brutal Congestion Control Algorithm in TCP. A kernel module needs to be installed on the Linux server, see [TCP Brutal](/configuration/shared/tcp-brutal) for details.
|
||||
Hysteria Brutal Congestion Control Algorithm in TCP. A kernel module needs to be installed on the Linux server,
|
||||
see [TCP Brutal](/configuration/shared/tcp-brutal) for details.
|
||||
|
||||
#### 1.7.0-alpha.3
|
||||
|
||||
@ -217,8 +298,8 @@ When `auto_route` is enabled and `strict_route` is disabled, the device can now
|
||||
|
||||
**2**:
|
||||
|
||||
Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008, Server 2012 and macOS 10.13 High Sierra, 10.14 Mojave.
|
||||
|
||||
Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008, Server 2012 and macOS 10.13 High
|
||||
Sierra, 10.14 Mojave.
|
||||
|
||||
#### 1.6.0-rc.4
|
||||
|
||||
@ -231,7 +312,8 @@ Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008
|
||||
|
||||
**1**:
|
||||
|
||||
Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008, Server 2012 and macOS 10.13 High Sierra, 10.14 Mojave.
|
||||
Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008, Server 2012 and macOS 10.13 High
|
||||
Sierra, 10.14 Mojave.
|
||||
|
||||
#### 1.6.0-beta.4
|
||||
|
||||
|
@ -1,3 +1,14 @@
|
||||
---
|
||||
icon: material/alert-decagram
|
||||
---
|
||||
|
||||
!!! quote "Changes in sing-box 1.8.0"
|
||||
|
||||
:material-plus: [rule_set](#rule_set)
|
||||
:material-plus: [source_ip_is_private](#source_ip_is_private)
|
||||
:material-delete-clock: [geoip](#geoip)
|
||||
:material-delete-clock: [geosite](#geosite)
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
@ -46,6 +57,7 @@
|
||||
"10.0.0.0/24",
|
||||
"192.168.0.1"
|
||||
],
|
||||
"source_ip_is_private": false,
|
||||
"source_port": [
|
||||
12345
|
||||
],
|
||||
@ -85,6 +97,10 @@
|
||||
"wifi_bssid": [
|
||||
"00:00:00:00:00:00"
|
||||
],
|
||||
"rule_set": [
|
||||
"geoip-cn",
|
||||
"geosite-cn"
|
||||
],
|
||||
"invert": false,
|
||||
"outbound": [
|
||||
"direct"
|
||||
@ -166,15 +182,29 @@ Match domain using regular expression.
|
||||
|
||||
#### geosite
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
Geosite is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
||||
|
||||
Match geosite.
|
||||
|
||||
#### source_geoip
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
GeoIP is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||
|
||||
Match source geoip.
|
||||
|
||||
#### source_ip_cidr
|
||||
|
||||
Match source ip cidr.
|
||||
Match source IP CIDR.
|
||||
|
||||
#### source_ip_is_private
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
Match non-public source IP.
|
||||
|
||||
#### source_port
|
||||
|
||||
@ -250,6 +280,12 @@ Match WiFi SSID.
|
||||
|
||||
Match WiFi BSSID.
|
||||
|
||||
#### rule_set
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
Match [Rule Set](/configuration/route/#rule_set).
|
||||
|
||||
#### invert
|
||||
|
||||
Invert match result.
|
||||
@ -286,4 +322,4 @@ Rewrite TTL in DNS responses.
|
||||
|
||||
#### rules
|
||||
|
||||
Included default rules.
|
||||
Included rules.
|
@ -1,3 +1,14 @@
|
||||
---
|
||||
icon: material/alert-decagram
|
||||
---
|
||||
|
||||
!!! quote "sing-box 1.8.0 中的更改"
|
||||
|
||||
:material-plus: [rule_set](#rule_set)
|
||||
:material-plus: [source_ip_is_private](#source_ip_is_private)
|
||||
:material-delete-clock: [geoip](#geoip)
|
||||
:material-delete-clock: [geosite](#geosite)
|
||||
|
||||
### 结构
|
||||
|
||||
```json
|
||||
@ -45,6 +56,7 @@
|
||||
"source_ip_cidr": [
|
||||
"10.0.0.0/24"
|
||||
],
|
||||
"source_ip_is_private": false,
|
||||
"source_port": [
|
||||
12345
|
||||
],
|
||||
@ -84,6 +96,10 @@
|
||||
"wifi_bssid": [
|
||||
"00:00:00:00:00:00"
|
||||
],
|
||||
"rule_set": [
|
||||
"geoip-cn",
|
||||
"geosite-cn"
|
||||
],
|
||||
"invert": false,
|
||||
"outbound": [
|
||||
"direct"
|
||||
@ -163,16 +179,30 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
||||
|
||||
#### geosite
|
||||
|
||||
匹配 GeoSite。
|
||||
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||
|
||||
Geosite 已废弃且可能在不久的将来移除,参阅 [迁移指南](/migration/#migrate-geosite-to-rule-sets)。
|
||||
|
||||
匹配 Geosite。
|
||||
|
||||
#### source_geoip
|
||||
|
||||
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||
|
||||
GeoIp 已废弃且可能在不久的将来移除,参阅 [迁移指南](/migration/#migrate-geoip-to-rule-sets)。
|
||||
|
||||
匹配源 GeoIP。
|
||||
|
||||
#### source_ip_cidr
|
||||
|
||||
匹配源 IP CIDR。
|
||||
|
||||
#### source_ip_is_private
|
||||
|
||||
!!! question "自 sing-box 1.8.0 起"
|
||||
|
||||
匹配非公开源 IP。
|
||||
|
||||
#### source_port
|
||||
|
||||
匹配源端口。
|
||||
@ -245,6 +275,12 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
||||
|
||||
匹配 WiFi BSSID。
|
||||
|
||||
#### rule_set
|
||||
|
||||
!!! question "自 sing-box 1.8.0 起"
|
||||
|
||||
匹配[规则集](/zh/configuration/route/#rule_set)。
|
||||
|
||||
#### invert
|
||||
|
||||
反选匹配结果。
|
||||
@ -281,4 +317,4 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
||||
|
||||
#### rules
|
||||
|
||||
包括的默认规则。
|
||||
包括的规则。
|
34
docs/configuration/experimental/cache-file.md
Normal file
34
docs/configuration/experimental/cache-file.md
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
icon: material/new-box
|
||||
---
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"enabled": true,
|
||||
"path": "",
|
||||
"cache_id": "",
|
||||
"store_fakeip": false
|
||||
}
|
||||
```
|
||||
|
||||
### Fields
|
||||
|
||||
#### enabled
|
||||
|
||||
Enable cache file.
|
||||
|
||||
#### path
|
||||
|
||||
Path to the cache file.
|
||||
|
||||
`cache.db` will be used if empty.
|
||||
|
||||
#### cache_id
|
||||
|
||||
Identifier in cache file.
|
||||
|
||||
If not empty, configuration specified data will use a separate store keyed by it.
|
121
docs/configuration/experimental/clash-api.md
Normal file
121
docs/configuration/experimental/clash-api.md
Normal file
@ -0,0 +1,121 @@
|
||||
---
|
||||
icon: material/alert-decagram
|
||||
---
|
||||
|
||||
!!! quote "Changes in sing-box 1.8.0"
|
||||
|
||||
:material-delete-alert: [store_mode](#store_mode)
|
||||
:material-delete-alert: [store_selected](#store_selected)
|
||||
:material-delete-alert: [store_fakeip](#store_fakeip)
|
||||
:material-delete-alert: [cache_file](#cache_file)
|
||||
:material-delete-alert: [cache_id](#cache_id)
|
||||
|
||||
|
||||
!!! quote ""
|
||||
|
||||
Clash API is not included by default, see [Installation](./#installation).
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"external_controller": "127.0.0.1:9090",
|
||||
"external_ui": "",
|
||||
"external_ui_download_url": "",
|
||||
"external_ui_download_detour": "",
|
||||
"secret": "",
|
||||
"default_mode": "",
|
||||
|
||||
// Deprecated
|
||||
|
||||
"store_mode": false,
|
||||
"store_selected": false,
|
||||
"store_fakeip": false,
|
||||
"cache_file": "",
|
||||
"cache_id": ""
|
||||
}
|
||||
```
|
||||
|
||||
### Fields
|
||||
|
||||
#### external_controller
|
||||
|
||||
RESTful web API listening address. Clash API will be disabled if empty.
|
||||
|
||||
#### external_ui
|
||||
|
||||
A relative path to the configuration directory or an absolute path to a
|
||||
directory in which you put some static web resource. sing-box will then
|
||||
serve it at `http://{{external-controller}}/ui`.
|
||||
|
||||
|
||||
|
||||
#### external_ui_download_url
|
||||
|
||||
ZIP download URL for the external UI, will be used if the specified `external_ui` directory is empty.
|
||||
|
||||
`https://github.com/MetaCubeX/Yacd-meta/archive/gh-pages.zip` will be used if empty.
|
||||
|
||||
#### external_ui_download_detour
|
||||
|
||||
The tag of the outbound to download the external UI.
|
||||
|
||||
Default outbound will be used if empty.
|
||||
|
||||
#### secret
|
||||
|
||||
Secret for the RESTful API (optional)
|
||||
Authenticate by spedifying HTTP header `Authorization: Bearer ${secret}`
|
||||
ALWAYS set a secret if RESTful API is listening on 0.0.0.0
|
||||
|
||||
#### default_mode
|
||||
|
||||
Default mode in clash, `Rule` will be used if empty.
|
||||
|
||||
This setting has no direct effect, but can be used in routing and DNS rules via the `clash_mode` rule item.
|
||||
|
||||
#### store_mode
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
`store_mode` is deprecated in Clash API and enabled by default if `cache_file.enabled`.
|
||||
|
||||
Store Clash mode in cache file.
|
||||
|
||||
#### store_selected
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
`store_selected` is deprecated in Clash API and enabled by default if `cache_file.enabled`.
|
||||
|
||||
!!! note ""
|
||||
|
||||
The tag must be set for target outbounds.
|
||||
|
||||
Store selected outbound for the `Selector` outbound in cache file.
|
||||
|
||||
#### store_fakeip
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
`store_selected` is deprecated in Clash API and migrated to `cache_file.store_fakeip`.
|
||||
|
||||
Store fakeip in cache file.
|
||||
|
||||
#### cache_file
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
`cache_file` is deprecated in Clash API and migrated to `cache_file.enabled` and `cache_file.path`.
|
||||
|
||||
Cache file path, `cache.db` will be used if empty.
|
||||
|
||||
#### cache_id
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
`cache_id` is deprecated in Clash API and migrated to `cache_file.cache_id`.
|
||||
|
||||
Identifier in cache file.
|
||||
|
||||
If not empty, configuration specified data will use a separate store keyed by it.
|
@ -1,139 +1,30 @@
|
||||
---
|
||||
icon: material/alert-decagram
|
||||
---
|
||||
|
||||
# Experimental
|
||||
|
||||
!!! quote "Changes in sing-box 1.8.0"
|
||||
|
||||
:material-plus: [cache_file](#cache_file)
|
||||
:material-alert-decagram: [clash_api](#clash_api)
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"experimental": {
|
||||
"clash_api": {
|
||||
"external_controller": "127.0.0.1:9090",
|
||||
"external_ui": "",
|
||||
"external_ui_download_url": "",
|
||||
"external_ui_download_detour": "",
|
||||
"secret": "",
|
||||
"default_mode": "",
|
||||
"store_mode": false,
|
||||
"store_selected": false,
|
||||
"store_fakeip": false,
|
||||
"cache_file": "",
|
||||
"cache_id": ""
|
||||
},
|
||||
"v2ray_api": {
|
||||
"listen": "127.0.0.1:8080",
|
||||
"stats": {
|
||||
"enabled": true,
|
||||
"inbounds": [
|
||||
"socks-in"
|
||||
],
|
||||
"outbounds": [
|
||||
"proxy",
|
||||
"direct"
|
||||
],
|
||||
"users": [
|
||||
"sekai"
|
||||
]
|
||||
}
|
||||
}
|
||||
"cache_file": {},
|
||||
"clash_api": {},
|
||||
"v2ray_api": {}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
!!! note ""
|
||||
### Fields
|
||||
|
||||
Traffic statistics and connection management can degrade performance.
|
||||
|
||||
### Clash API Fields
|
||||
|
||||
!!! quote ""
|
||||
|
||||
Clash API is not included by default, see [Installation](./#installation).
|
||||
|
||||
#### external_controller
|
||||
|
||||
RESTful web API listening address. Clash API will be disabled if empty.
|
||||
|
||||
#### external_ui
|
||||
|
||||
A relative path to the configuration directory or an absolute path to a
|
||||
directory in which you put some static web resource. sing-box will then
|
||||
serve it at `http://{{external-controller}}/ui`.
|
||||
|
||||
#### external_ui_download_url
|
||||
|
||||
ZIP download URL for the external UI, will be used if the specified `external_ui` directory is empty.
|
||||
|
||||
`https://github.com/MetaCubeX/Yacd-meta/archive/gh-pages.zip` will be used if empty.
|
||||
|
||||
#### external_ui_download_detour
|
||||
|
||||
The tag of the outbound to download the external UI.
|
||||
|
||||
Default outbound will be used if empty.
|
||||
|
||||
#### secret
|
||||
|
||||
Secret for the RESTful API (optional)
|
||||
Authenticate by spedifying HTTP header `Authorization: Bearer ${secret}`
|
||||
ALWAYS set a secret if RESTful API is listening on 0.0.0.0
|
||||
|
||||
#### default_mode
|
||||
|
||||
Default mode in clash, `Rule` will be used if empty.
|
||||
|
||||
This setting has no direct effect, but can be used in routing and DNS rules via the `clash_mode` rule item.
|
||||
|
||||
#### store_mode
|
||||
|
||||
Store Clash mode in cache file.
|
||||
|
||||
#### store_selected
|
||||
|
||||
!!! note ""
|
||||
|
||||
The tag must be set for target outbounds.
|
||||
|
||||
Store selected outbound for the `Selector` outbound in cache file.
|
||||
|
||||
#### store_fakeip
|
||||
|
||||
Store fakeip in cache file.
|
||||
|
||||
#### cache_file
|
||||
|
||||
Cache file path, `cache.db` will be used if empty.
|
||||
|
||||
#### cache_id
|
||||
|
||||
Cache ID.
|
||||
|
||||
If not empty, `store_selected` will use a separate store keyed by it.
|
||||
|
||||
### V2Ray API Fields
|
||||
|
||||
!!! quote ""
|
||||
|
||||
V2Ray API is not included by default, see [Installation](./#installation).
|
||||
|
||||
#### listen
|
||||
|
||||
gRPC API listening address. V2Ray API will be disabled if empty.
|
||||
|
||||
#### stats
|
||||
|
||||
Traffic statistics service settings.
|
||||
|
||||
#### stats.enabled
|
||||
|
||||
Enable statistics service.
|
||||
|
||||
#### stats.inbounds
|
||||
|
||||
Inbound list to count traffic.
|
||||
|
||||
#### stats.outbounds
|
||||
|
||||
Outbound list to count traffic.
|
||||
|
||||
#### stats.users
|
||||
|
||||
User list to count traffic.
|
||||
| Key | Format |
|
||||
|--------------|----------------------------|
|
||||
| `cache_file` | [Cache File](./cache-file) |
|
||||
| `clash_api` | [Clash API](./clash-api) |
|
||||
| `v2ray_api` | [V2Ray API](./v2ray-api) |
|
@ -1,137 +0,0 @@
|
||||
# 实验性
|
||||
|
||||
### 结构
|
||||
|
||||
```json
|
||||
{
|
||||
"experimental": {
|
||||
"clash_api": {
|
||||
"external_controller": "127.0.0.1:9090",
|
||||
"external_ui": "",
|
||||
"external_ui_download_url": "",
|
||||
"external_ui_download_detour": "",
|
||||
"secret": "",
|
||||
"default_mode": "",
|
||||
"store_mode": false,
|
||||
"store_selected": false,
|
||||
"store_fakeip": false,
|
||||
"cache_file": "",
|
||||
"cache_id": ""
|
||||
},
|
||||
"v2ray_api": {
|
||||
"listen": "127.0.0.1:8080",
|
||||
"stats": {
|
||||
"enabled": true,
|
||||
"inbounds": [
|
||||
"socks-in"
|
||||
],
|
||||
"outbounds": [
|
||||
"proxy",
|
||||
"direct"
|
||||
],
|
||||
"users": [
|
||||
"sekai"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
!!! note ""
|
||||
|
||||
流量统计和连接管理会降低性能。
|
||||
|
||||
### Clash API 字段
|
||||
|
||||
!!! quote ""
|
||||
|
||||
默认安装不包含 Clash API,参阅 [安装](/zh/#_2)。
|
||||
|
||||
#### external_controller
|
||||
|
||||
RESTful web API 监听地址。如果为空,则禁用 Clash API。
|
||||
|
||||
#### external_ui
|
||||
|
||||
到静态网页资源目录的相对路径或绝对路径。sing-box 会在 `http://{{external-controller}}/ui` 下提供它。
|
||||
|
||||
#### external_ui_download_url
|
||||
|
||||
静态网页资源的 ZIP 下载 URL,如果指定的 `external_ui` 目录为空,将使用。
|
||||
|
||||
默认使用 `https://github.com/MetaCubeX/Yacd-meta/archive/gh-pages.zip`。
|
||||
|
||||
#### external_ui_download_detour
|
||||
|
||||
用于下载静态网页资源的出站的标签。
|
||||
|
||||
如果为空,将使用默认出站。
|
||||
|
||||
#### secret
|
||||
|
||||
RESTful API 的密钥(可选)
|
||||
通过指定 HTTP 标头 `Authorization: Bearer ${secret}` 进行身份验证
|
||||
如果 RESTful API 正在监听 0.0.0.0,请始终设置一个密钥。
|
||||
|
||||
#### default_mode
|
||||
|
||||
Clash 中的默认模式,默认使用 `Rule`。
|
||||
|
||||
此设置没有直接影响,但可以通过 `clash_mode` 规则项在路由和 DNS 规则中使用。
|
||||
|
||||
#### store_mode
|
||||
|
||||
将 Clash 模式存储在缓存文件中。
|
||||
|
||||
#### store_selected
|
||||
|
||||
!!! note ""
|
||||
|
||||
必须为目标出站设置标签。
|
||||
|
||||
将 `Selector` 中出站的选定的目标出站存储在缓存文件中。
|
||||
|
||||
#### store_fakeip
|
||||
|
||||
将 fakeip 存储在缓存文件中。
|
||||
|
||||
#### cache_file
|
||||
|
||||
缓存文件路径,默认使用`cache.db`。
|
||||
|
||||
#### cache_id
|
||||
|
||||
缓存 ID。
|
||||
|
||||
如果不为空,`store_selected` 将会使用以此为键的独立存储。
|
||||
|
||||
### V2Ray API 字段
|
||||
|
||||
!!! quote ""
|
||||
|
||||
默认安装不包含 V2Ray API,参阅 [安装](/zh/#_2)。
|
||||
|
||||
#### listen
|
||||
|
||||
gRPC API 监听地址。如果为空,则禁用 V2Ray API。
|
||||
|
||||
#### stats
|
||||
|
||||
流量统计服务设置。
|
||||
|
||||
#### stats.enabled
|
||||
|
||||
启用统计服务。
|
||||
|
||||
#### stats.inbounds
|
||||
|
||||
统计流量的入站列表。
|
||||
|
||||
#### stats.outbounds
|
||||
|
||||
统计流量的出站列表。
|
||||
|
||||
#### stats.users
|
||||
|
||||
统计流量的用户列表。
|
50
docs/configuration/experimental/v2ray-api.md
Normal file
50
docs/configuration/experimental/v2ray-api.md
Normal file
@ -0,0 +1,50 @@
|
||||
### Structure
|
||||
|
||||
!!! quote ""
|
||||
|
||||
V2Ray API is not included by default, see [Installation](./#installation).
|
||||
|
||||
```json
|
||||
{
|
||||
"listen": "127.0.0.1:8080",
|
||||
"stats": {
|
||||
"enabled": true,
|
||||
"inbounds": [
|
||||
"socks-in"
|
||||
],
|
||||
"outbounds": [
|
||||
"proxy",
|
||||
"direct"
|
||||
],
|
||||
"users": [
|
||||
"sekai"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Fields
|
||||
|
||||
#### listen
|
||||
|
||||
gRPC API listening address. V2Ray API will be disabled if empty.
|
||||
|
||||
#### stats
|
||||
|
||||
Traffic statistics service settings.
|
||||
|
||||
#### stats.enabled
|
||||
|
||||
Enable statistics service.
|
||||
|
||||
#### stats.inbounds
|
||||
|
||||
Inbound list to count traffic.
|
||||
|
||||
#### stats.outbounds
|
||||
|
||||
Outbound list to count traffic.
|
||||
|
||||
#### stats.users
|
||||
|
||||
User list to count traffic.
|
@ -1,3 +1,11 @@
|
||||
---
|
||||
icon: material/delete-clock
|
||||
---
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
GeoIP is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
|
@ -1,33 +0,0 @@
|
||||
### 结构
|
||||
|
||||
```json
|
||||
{
|
||||
"route": {
|
||||
"geoip": {
|
||||
"path": "",
|
||||
"download_url": "",
|
||||
"download_detour": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 字段
|
||||
|
||||
#### path
|
||||
|
||||
指定 GeoIP 资源的路径。
|
||||
|
||||
默认 `geoip.db`。
|
||||
|
||||
#### download_url
|
||||
|
||||
指定 GeoIP 资源的下载链接。
|
||||
|
||||
默认为 `https://github.com/SagerNet/sing-geoip/releases/latest/download/geoip.db`。
|
||||
|
||||
#### download_detour
|
||||
|
||||
用于下载 GeoIP 资源的出站的标签。
|
||||
|
||||
如果为空,将使用默认出站。
|
@ -1,3 +1,11 @@
|
||||
---
|
||||
icon: material/delete-clock
|
||||
---
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
Geosite is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
|
@ -1,33 +0,0 @@
|
||||
### 结构
|
||||
|
||||
```json
|
||||
{
|
||||
"route": {
|
||||
"geosite": {
|
||||
"path": "",
|
||||
"download_url": "",
|
||||
"download_detour": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 字段
|
||||
|
||||
#### path
|
||||
|
||||
指定 GeoSite 资源的路径。
|
||||
|
||||
默认 `geosite.db`。
|
||||
|
||||
#### download_url
|
||||
|
||||
指定 GeoSite 资源的下载链接。
|
||||
|
||||
默认为 `https://github.com/SagerNet/sing-geosite/releases/latest/download/geosite.db`。
|
||||
|
||||
#### download_detour
|
||||
|
||||
用于下载 GeoSite 资源的出站的标签。
|
||||
|
||||
如果为空,将使用默认出站。
|
@ -1,5 +1,15 @@
|
||||
---
|
||||
icon: material/alert-decagram
|
||||
---
|
||||
|
||||
# Route
|
||||
|
||||
!!! quote "Changes in sing-box 1.8.0"
|
||||
|
||||
:material-plus: [rule_set](#rule_set)
|
||||
:material-delete-clock: [geoip](#geoip)
|
||||
:material-delete-clock: [geosite](#geosite)
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
@ -8,6 +18,7 @@
|
||||
"geoip": {},
|
||||
"geosite": {},
|
||||
"rules": [],
|
||||
"rule_set": [],
|
||||
"final": "",
|
||||
"auto_detect_interface": false,
|
||||
"override_android_vpn": false,
|
||||
@ -19,11 +30,20 @@
|
||||
|
||||
### Fields
|
||||
|
||||
| Key | Format |
|
||||
|------------|------------------------------------|
|
||||
| `geoip` | [GeoIP](./geoip) |
|
||||
| `geosite` | [Geosite](./geosite) |
|
||||
| `rules` | List of [Route Rule](./rule) |
|
||||
| Key | Format |
|
||||
|-----------|----------------------|
|
||||
| `geoip` | [GeoIP](./geoip) |
|
||||
| `geosite` | [Geosite](./geosite) |
|
||||
|
||||
#### rules
|
||||
|
||||
List of [Route Rule](./rule)
|
||||
|
||||
#### rule_set
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
List of [Rule Set](/configuration/rule-set)
|
||||
|
||||
#### final
|
||||
|
||||
|
@ -1,5 +1,15 @@
|
||||
---
|
||||
icon: material/alert-decagram
|
||||
---
|
||||
|
||||
# 路由
|
||||
|
||||
!!! quote "sing-box 1.8.0 中的更改"
|
||||
|
||||
:material-plus: [rule_set](#rule_set)
|
||||
:material-delete-clock: [geoip](#geoip)
|
||||
:material-delete-clock: [geosite](#geosite)
|
||||
|
||||
### 结构
|
||||
|
||||
```json
|
||||
@ -7,8 +17,8 @@
|
||||
"route": {
|
||||
"geoip": {},
|
||||
"geosite": {},
|
||||
"ip_rules": [],
|
||||
"rules": [],
|
||||
"rule_set": [],
|
||||
"final": "",
|
||||
"auto_detect_interface": false,
|
||||
"override_android_vpn": false,
|
||||
@ -20,11 +30,21 @@
|
||||
|
||||
### 字段
|
||||
|
||||
| 键 | 格式 |
|
||||
|------------|-------------------------|
|
||||
| `geoip` | [GeoIP](./geoip) |
|
||||
| `geosite` | [GeoSite](./geosite) |
|
||||
| `rules` | 一组 [路由规则](./rule) |
|
||||
| 键 | 格式 |
|
||||
|------------|-----------------------------------|
|
||||
| `geoip` | [GeoIP](./geoip) |
|
||||
| `geosite` | [Geosite](./geosite) |
|
||||
|
||||
|
||||
#### rule
|
||||
|
||||
一组 [路由规则](./rule)。
|
||||
|
||||
#### rule_set
|
||||
|
||||
!!! question "自 sing-box 1.8.0 起"
|
||||
|
||||
一组 [规则集](/configuration/rule-set)。
|
||||
|
||||
#### final
|
||||
|
||||
|
@ -1,3 +1,17 @@
|
||||
---
|
||||
icon: material/alert-decagram
|
||||
---
|
||||
|
||||
!!! quote "Changes in sing-box 1.8.0"
|
||||
|
||||
:material-plus: [rule_set](#rule_set)
|
||||
:material-plus: [rule_set_ipcidr_match_source](#rule_set_ipcidr_match_source)
|
||||
:material-plus: [source_ip_is_private](#source_ip_is_private)
|
||||
:material-plus: [ip_is_private](#ip_is_private)
|
||||
:material-delete-clock: [source_geoip](#source_geoip)
|
||||
:material-delete-clock: [geoip](#geoip)
|
||||
:material-delete-clock: [geosite](#geosite)
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
@ -46,10 +60,12 @@
|
||||
"10.0.0.0/24",
|
||||
"192.168.0.1"
|
||||
],
|
||||
"source_ip_is_private": false,
|
||||
"ip_cidr": [
|
||||
"10.0.0.0/24",
|
||||
"192.168.0.1"
|
||||
],
|
||||
"ip_is_private": false,
|
||||
"source_port": [
|
||||
12345
|
||||
],
|
||||
@ -89,6 +105,10 @@
|
||||
"wifi_bssid": [
|
||||
"00:00:00:00:00:00"
|
||||
],
|
||||
"rule_set": [
|
||||
"geoip-cn",
|
||||
"geosite-cn"
|
||||
],
|
||||
"invert": false,
|
||||
"outbound": "direct"
|
||||
},
|
||||
@ -160,23 +180,47 @@ Match domain using regular expression.
|
||||
|
||||
#### geosite
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
Geosite is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
||||
|
||||
Match geosite.
|
||||
|
||||
#### source_geoip
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
GeoIP is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||
|
||||
Match source geoip.
|
||||
|
||||
#### geoip
|
||||
|
||||
!!! failure "Deprecated in sing-box 1.8.0"
|
||||
|
||||
GeoIP is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||
|
||||
Match geoip.
|
||||
|
||||
#### source_ip_cidr
|
||||
|
||||
Match source ip cidr.
|
||||
Match source IP CIDR.
|
||||
|
||||
#### ip_is_private
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
Match non-public IP.
|
||||
|
||||
#### ip_cidr
|
||||
|
||||
Match ip cidr.
|
||||
Match IP CIDR.
|
||||
|
||||
#### source_ip_is_private
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
Match non-public source IP.
|
||||
|
||||
#### source_port
|
||||
|
||||
@ -250,6 +294,18 @@ Match WiFi SSID.
|
||||
|
||||
Match WiFi BSSID.
|
||||
|
||||
#### rule_set
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
Match [Rule Set](/configuration/route/#rule_set).
|
||||
|
||||
#### rule_set_ipcidr_match_source
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
Make `ipcidr` in rule sets match the source IP.
|
||||
|
||||
#### invert
|
||||
|
||||
Invert match result.
|
||||
@ -276,4 +332,4 @@ Tag of the target outbound.
|
||||
|
||||
==Required==
|
||||
|
||||
Included default rules.
|
||||
Included rules.
|
||||
|
@ -1,3 +1,17 @@
|
||||
---
|
||||
icon: material/alert-decagram
|
||||
---
|
||||
|
||||
!!! quote "sing-box 1.8.0 中的更改"
|
||||
|
||||
:material-plus: [rule_set](#rule_set)
|
||||
:material-plus: [rule_set_ipcidr_match_source](#rule_set_ipcidr_match_source)
|
||||
:material-plus: [source_ip_is_private](#source_ip_is_private)
|
||||
:material-plus: [ip_is_private](#ip_is_private)
|
||||
:material-delete-clock: [source_geoip](#source_geoip)
|
||||
:material-delete-clock: [geoip](#geoip)
|
||||
:material-delete-clock: [geosite](#geosite)
|
||||
|
||||
### 结构
|
||||
|
||||
```json
|
||||
@ -45,9 +59,11 @@
|
||||
"source_ip_cidr": [
|
||||
"10.0.0.0/24"
|
||||
],
|
||||
"source_ip_is_private": false,
|
||||
"ip_cidr": [
|
||||
"10.0.0.0/24"
|
||||
],
|
||||
"ip_is_private": false,
|
||||
"source_port": [
|
||||
12345
|
||||
],
|
||||
@ -87,6 +103,10 @@
|
||||
"wifi_bssid": [
|
||||
"00:00:00:00:00:00"
|
||||
],
|
||||
"rule_set": [
|
||||
"geoip-cn",
|
||||
"geosite-cn"
|
||||
],
|
||||
"invert": false,
|
||||
"outbound": "direct"
|
||||
},
|
||||
@ -158,24 +178,48 @@
|
||||
|
||||
#### geosite
|
||||
|
||||
匹配 GeoSite。
|
||||
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||
|
||||
Geosite 已废弃且可能在不久的将来移除,参阅 [迁移指南](/migration/#migrate-geosite-to-rule-sets)。
|
||||
|
||||
匹配 Geosite。
|
||||
|
||||
#### source_geoip
|
||||
|
||||
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||
|
||||
GeoIp 已废弃且可能在不久的将来移除,参阅 [迁移指南](/migration/#migrate-geoip-to-rule-sets)。
|
||||
|
||||
匹配源 GeoIP。
|
||||
|
||||
#### geoip
|
||||
|
||||
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||
|
||||
GeoIp 已废弃且可能在不久的将来移除,参阅 [迁移指南](/migration/#migrate-geoip-to-rule-sets)。
|
||||
|
||||
匹配 GeoIP。
|
||||
|
||||
#### source_ip_cidr
|
||||
|
||||
匹配源 IP CIDR。
|
||||
|
||||
#### source_ip_is_private
|
||||
|
||||
!!! question "自 sing-box 1.8.0 起"
|
||||
|
||||
匹配非公开源 IP。
|
||||
|
||||
#### ip_cidr
|
||||
|
||||
匹配 IP CIDR。
|
||||
|
||||
#### ip_is_private
|
||||
|
||||
!!! question "自 sing-box 1.8.0 起"
|
||||
|
||||
匹配非公开 IP。
|
||||
|
||||
#### source_port
|
||||
|
||||
匹配源端口。
|
||||
@ -248,6 +292,18 @@
|
||||
|
||||
匹配 WiFi BSSID。
|
||||
|
||||
#### rule_set
|
||||
|
||||
!!! question "自 sing-box 1.8.0 起"
|
||||
|
||||
匹配[规则集](/zh/configuration/route/#rule_set)。
|
||||
|
||||
#### rule_set_ipcidr_match_source
|
||||
|
||||
!!! question "自 sing-box 1.8.0 起"
|
||||
|
||||
使规则集中的 `ipcidr` 规则匹配源 IP。
|
||||
|
||||
#### invert
|
||||
|
||||
反选匹配结果。
|
||||
@ -274,4 +330,4 @@
|
||||
|
||||
==必填==
|
||||
|
||||
包括的默认规则。
|
||||
包括的规则。
|
207
docs/configuration/rule-set/headless-rule.md
Normal file
207
docs/configuration/rule-set/headless-rule.md
Normal file
@ -0,0 +1,207 @@
|
||||
---
|
||||
icon: material/new-box
|
||||
---
|
||||
|
||||
### Structure
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": [
|
||||
{
|
||||
"query_type": [
|
||||
"A",
|
||||
"HTTPS",
|
||||
32768
|
||||
],
|
||||
"network": [
|
||||
"tcp"
|
||||
],
|
||||
"domain": [
|
||||
"test.com"
|
||||
],
|
||||
"domain_suffix": [
|
||||
".cn"
|
||||
],
|
||||
"domain_keyword": [
|
||||
"test"
|
||||
],
|
||||
"domain_regex": [
|
||||
"^stun\\..+"
|
||||
],
|
||||
"source_ip_cidr": [
|
||||
"10.0.0.0/24",
|
||||
"192.168.0.1"
|
||||
],
|
||||
"ip_cidr": [
|
||||
"10.0.0.0/24",
|
||||
"192.168.0.1"
|
||||
],
|
||||
"source_port": [
|
||||
12345
|
||||
],
|
||||
"source_port_range": [
|
||||
"1000:2000",
|
||||
":3000",
|
||||
"4000:"
|
||||
],
|
||||
"port": [
|
||||
80,
|
||||
443
|
||||
],
|
||||
"port_range": [
|
||||
"1000:2000",
|
||||
":3000",
|
||||
"4000:"
|
||||
],
|
||||
"process_name": [
|
||||
"curl"
|
||||
],
|
||||
"process_path": [
|
||||
"/usr/bin/curl"
|
||||
],
|
||||
"package_name": [
|
||||
"com.termux"
|
||||
],
|
||||
"wifi_ssid": [
|
||||
"My WIFI"
|
||||
],
|
||||
"wifi_bssid": [
|
||||
"00:00:00:00:00:00"
|
||||
],
|
||||
"invert": false
|
||||
},
|
||||
{
|
||||
"type": "logical",
|
||||
"mode": "and",
|
||||
"rules": [],
|
||||
"invert": false
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
!!! note ""
|
||||
|
||||
You can ignore the JSON Array [] tag when the content is only one item
|
||||
|
||||
### Default Fields
|
||||
|
||||
!!! note ""
|
||||
|
||||
The default rule uses the following matching logic:
|
||||
(`domain` || `domain_suffix` || `domain_keyword` || `domain_regex` || `ip_cidr`) &&
|
||||
(`port` || `port_range`) &&
|
||||
(`source_port` || `source_port_range`) &&
|
||||
`other fields`
|
||||
|
||||
#### query_type
|
||||
|
||||
DNS query type. Values can be integers or type name strings.
|
||||
|
||||
#### network
|
||||
|
||||
`tcp` or `udp`.
|
||||
|
||||
#### domain
|
||||
|
||||
Match full domain.
|
||||
|
||||
#### domain_suffix
|
||||
|
||||
Match domain suffix.
|
||||
|
||||
#### domain_keyword
|
||||
|
||||
Match domain using keyword.
|
||||
|
||||
#### domain_regex
|
||||
|
||||
Match domain using regular expression.
|
||||
|
||||
#### source_ip_cidr
|
||||
|
||||
Match source IP CIDR.
|
||||
|
||||
#### ip_cidr
|
||||
|
||||
!!! info ""
|
||||
|
||||
`ip_cidr` is an alias for `source_ip_cidr` when the Rule Set is used in DNS rules or `rule_set_ipcidr_match_source` enabled in route rules.
|
||||
|
||||
Match IP CIDR.
|
||||
|
||||
#### source_port
|
||||
|
||||
Match source port.
|
||||
|
||||
#### source_port_range
|
||||
|
||||
Match source port range.
|
||||
|
||||
#### port
|
||||
|
||||
Match port.
|
||||
|
||||
#### port_range
|
||||
|
||||
Match port range.
|
||||
|
||||
#### process_name
|
||||
|
||||
!!! quote ""
|
||||
|
||||
Only supported on Linux, Windows, and macOS.
|
||||
|
||||
Match process name.
|
||||
|
||||
#### process_path
|
||||
|
||||
!!! quote ""
|
||||
|
||||
Only supported on Linux, Windows, and macOS.
|
||||
|
||||
Match process path.
|
||||
|
||||
#### package_name
|
||||
|
||||
Match android package name.
|
||||
|
||||
#### wifi_ssid
|
||||
|
||||
!!! quote ""
|
||||
|
||||
Only supported in graphical clients on Android and iOS.
|
||||
|
||||
Match WiFi SSID.
|
||||
|
||||
#### wifi_bssid
|
||||
|
||||
!!! quote ""
|
||||
|
||||
Only supported in graphical clients on Android and iOS.
|
||||
|
||||
Match WiFi BSSID.
|
||||
|
||||
#### invert
|
||||
|
||||
Invert match result.
|
||||
|
||||
### Logical Fields
|
||||
|
||||
#### type
|
||||
|
||||
`logical`
|
||||
|
||||
#### mode
|
||||
|
||||
==Required==
|
||||
|
||||
`and` or `or`
|
||||
|
||||
#### rules
|
||||
|
||||
==Required==
|
||||
|
||||
Included rules.
|
97
docs/configuration/rule-set/index.md
Normal file
97
docs/configuration/rule-set/index.md
Normal file
@ -0,0 +1,97 @@
|
||||
---
|
||||
icon: material/new-box
|
||||
---
|
||||
|
||||
# Rule Set
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "",
|
||||
"tag": "",
|
||||
"format": "",
|
||||
|
||||
... // Typed Fields
|
||||
}
|
||||
```
|
||||
|
||||
#### Local Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "local",
|
||||
|
||||
...
|
||||
|
||||
"path": ""
|
||||
}
|
||||
```
|
||||
|
||||
#### Remote Structure
|
||||
|
||||
!!! info ""
|
||||
|
||||
Remote rule-set will be cached if `experimental.cache_file.enabled`.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "remote",
|
||||
|
||||
...,
|
||||
|
||||
"url": "",
|
||||
"download_detour": "",
|
||||
"update_interval": ""
|
||||
}
|
||||
```
|
||||
|
||||
### Fields
|
||||
|
||||
#### type
|
||||
|
||||
==Required==
|
||||
|
||||
Type of Rule Set, `local` or `remote`.
|
||||
|
||||
#### tag
|
||||
|
||||
==Required==
|
||||
|
||||
Tag of Rule Set.
|
||||
|
||||
#### format
|
||||
|
||||
==Required==
|
||||
|
||||
Format of Rule Set, `source` or `binary`.
|
||||
|
||||
### Local Fields
|
||||
|
||||
#### path
|
||||
|
||||
==Required==
|
||||
|
||||
File path of Rule Set.
|
||||
|
||||
### Remote Fields
|
||||
|
||||
#### url
|
||||
|
||||
==Required==
|
||||
|
||||
Download URL of Rule Set.
|
||||
|
||||
#### download_detour
|
||||
|
||||
Tag of the outbound to download rule-set.
|
||||
|
||||
Default outbound will be used if empty.
|
||||
|
||||
#### update_interval
|
||||
|
||||
Update interval of Rule Set.
|
||||
|
||||
`1d` will be used if empty.
|
34
docs/configuration/rule-set/source-format.md
Normal file
34
docs/configuration/rule-set/source-format.md
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
icon: material/new-box
|
||||
---
|
||||
|
||||
# Source Format
|
||||
|
||||
!!! question "Since sing-box 1.8.0"
|
||||
|
||||
### Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"rules": []
|
||||
}
|
||||
```
|
||||
|
||||
### Compile
|
||||
|
||||
Use `sing-box rule-set compile [--output <file-name>.srs] <file-name>.json` to compile source to binary rule-set.
|
||||
|
||||
### Fields
|
||||
|
||||
#### version
|
||||
|
||||
==Required==
|
||||
|
||||
Version of Rule Set, must be `1`.
|
||||
|
||||
#### rules
|
||||
|
||||
==Required==
|
||||
|
||||
List of [Headless Rule](./headless-rule.md).
|
@ -343,6 +343,83 @@ flowchart TB
|
||||
}
|
||||
```
|
||||
|
||||
=== ":material-dns: DNS rules (1.8.0+)"
|
||||
|
||||
!!! info
|
||||
|
||||
DNS rules are optional if FakeIP is used.
|
||||
|
||||
```json
|
||||
{
|
||||
"dns": {
|
||||
"servers": [
|
||||
{
|
||||
"tag": "google",
|
||||
"address": "tls://8.8.8.8"
|
||||
},
|
||||
{
|
||||
"tag": "local",
|
||||
"address": "223.5.5.5",
|
||||
"detour": "direct"
|
||||
}
|
||||
],
|
||||
"rules": [
|
||||
{
|
||||
"outbound": "any",
|
||||
"server": "local"
|
||||
},
|
||||
{
|
||||
"clash_mode": "Direct",
|
||||
"server": "local"
|
||||
},
|
||||
{
|
||||
"clash_mode": "Global",
|
||||
"server": "google"
|
||||
},
|
||||
{
|
||||
"type": "logical",
|
||||
"mode": "and",
|
||||
"rules": [
|
||||
{
|
||||
"rule_set": "geosite-geolocation-!cn",
|
||||
"invert": true
|
||||
},
|
||||
{
|
||||
"rule_set": [
|
||||
"geosite-cn",
|
||||
"geosite-category-companies@cn"
|
||||
]
|
||||
}
|
||||
],
|
||||
"server": "local"
|
||||
}
|
||||
]
|
||||
},
|
||||
"route": {
|
||||
"rule_set": [
|
||||
{
|
||||
"type": "remote",
|
||||
"tag": "geosite-cn",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geosite/rule-set/geosite-cn.srs"
|
||||
},
|
||||
{
|
||||
"type": "remote",
|
||||
"tag": "geosite-geolocation-!cn",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geosite/rule-set/geosite-geolocation-!cn.srs"
|
||||
},
|
||||
{
|
||||
"type": "remote",
|
||||
"tag": "geosite-category-companies@cn",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geosite/rule-set/geosite-category-companies@cn.srs"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
=== ":material-router-network: Route rules"
|
||||
|
||||
```json
|
||||
@ -422,4 +499,111 @@ flowchart TB
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
=== ":material-router-network: Route rules (1.8.0+)"
|
||||
|
||||
```json
|
||||
{
|
||||
"outbounds": [
|
||||
{
|
||||
"type": "direct",
|
||||
"tag": "direct"
|
||||
},
|
||||
{
|
||||
"type": "block",
|
||||
"tag": "block"
|
||||
}
|
||||
],
|
||||
"route": {
|
||||
"rules": [
|
||||
{
|
||||
"type": "logical",
|
||||
"mode": "or",
|
||||
"rules": [
|
||||
{
|
||||
"protocol": "dns"
|
||||
},
|
||||
{
|
||||
"port": 53
|
||||
}
|
||||
],
|
||||
"outbound": "dns"
|
||||
},
|
||||
{
|
||||
"ip_is_private": true,
|
||||
"outbound": "direct"
|
||||
},
|
||||
{
|
||||
"clash_mode": "Direct",
|
||||
"outbound": "direct"
|
||||
},
|
||||
{
|
||||
"clash_mode": "Global",
|
||||
"outbound": "default"
|
||||
},
|
||||
{
|
||||
"type": "logical",
|
||||
"mode": "or",
|
||||
"rules": [
|
||||
{
|
||||
"port": 853
|
||||
},
|
||||
{
|
||||
"network": "udp",
|
||||
"port": 443
|
||||
},
|
||||
{
|
||||
"protocol": "stun"
|
||||
}
|
||||
],
|
||||
"outbound": "block"
|
||||
},
|
||||
{
|
||||
"type": "logical",
|
||||
"mode": "and",
|
||||
"rules": [
|
||||
{
|
||||
"rule_set": "geosite-geolocation-!cn",
|
||||
"invert": true
|
||||
},
|
||||
{
|
||||
"rule_set": [
|
||||
"geoip-cn",
|
||||
"geosite-cn",
|
||||
"geosite-category-companies@cn"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outbound": "direct"
|
||||
}
|
||||
],
|
||||
"rule_set": [
|
||||
{
|
||||
"type": "remote",
|
||||
"tag": "geoip-cn",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geoip/rule-set/geoip-cn.srs"
|
||||
},
|
||||
{
|
||||
"type": "remote",
|
||||
"tag": "geosite-cn",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geosite/rule-set/geosite-cn.srs"
|
||||
},
|
||||
{
|
||||
"type": "remote",
|
||||
"tag": "geosite-geolocation-!cn",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geosite/rule-set/geosite-geolocation-!cn.srs"
|
||||
},
|
||||
{
|
||||
"type": "remote",
|
||||
"tag": "geosite-category-companies@cn",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geosite/rule-set/geosite-category-companies@cn.srs"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
195
docs/migration.md
Normal file
195
docs/migration.md
Normal file
@ -0,0 +1,195 @@
|
||||
---
|
||||
icon: material/arrange-bring-forward
|
||||
---
|
||||
|
||||
# Migration
|
||||
|
||||
## 1.8.0
|
||||
|
||||
!!! warning "Unstable"
|
||||
|
||||
This version is still under development, and the following migration guide may be changed in the future.
|
||||
|
||||
### :material-close-box: Migrate cache file from Clash API to independent options
|
||||
|
||||
!!! info "Reference"
|
||||
|
||||
[Clash API](/configuration/experimental/clash-api) /
|
||||
[Cache File](/configuration/experimental/cache-file)
|
||||
|
||||
=== ":material-card-remove: Deprecated"
|
||||
|
||||
```json
|
||||
{
|
||||
"experimental": {
|
||||
"clash_api": {
|
||||
"cache_file": "cache.db", // default value
|
||||
"cahce_id": "my_profile2",
|
||||
"store_mode": true,
|
||||
"store_selected": true,
|
||||
"store_fakeip": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
=== ":material-card-multiple: New"
|
||||
|
||||
```json
|
||||
{
|
||||
"experimental" : {
|
||||
"cache_file": {
|
||||
"enabled": true,
|
||||
"path": "cache.db", // default value
|
||||
"cache_id": "my_profile2",
|
||||
"store_fakeip": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### :material-checkbox-intermediate: Migrate GeoIP to rule sets
|
||||
|
||||
!!! info "Reference"
|
||||
|
||||
[GeoIP](/configuration/route/geoip) /
|
||||
[Route](/configuration/route) /
|
||||
[Route Rule](/configuration/route/rule) /
|
||||
[DNS Rule](/configuration/dns/rule) /
|
||||
[Rule Set](/configuration/rule-set)
|
||||
|
||||
!!! tip
|
||||
|
||||
`sing-box geoip` commands can help you convert custom GeoIP into rule sets.
|
||||
|
||||
=== ":material-card-remove: Deprecated"
|
||||
|
||||
```json
|
||||
{
|
||||
"route": {
|
||||
"rules": [
|
||||
{
|
||||
"geoip": "private",
|
||||
"outbound": "direct"
|
||||
},
|
||||
{
|
||||
"geoip": "cn",
|
||||
"outbound": "direct"
|
||||
},
|
||||
{
|
||||
"source_geoip": "cn",
|
||||
"outbound": "block"
|
||||
}
|
||||
],
|
||||
"geoip": {
|
||||
"download_detour": "proxy"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
=== ":material-card-multiple: New"
|
||||
|
||||
```json
|
||||
{
|
||||
"route": {
|
||||
"rules": [
|
||||
{
|
||||
"ip_is_private": true,
|
||||
"outbound": "direct"
|
||||
},
|
||||
{
|
||||
"rule_set": "geoip-cn",
|
||||
"outbound": "direct"
|
||||
},
|
||||
{
|
||||
"rule_set": "geoip-us",
|
||||
"rule_set_ipcidr_match_source": true,
|
||||
"outbound": "block"
|
||||
}
|
||||
],
|
||||
"rule_set": [
|
||||
{
|
||||
"tag": "geoip-cn",
|
||||
"type": "remote",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geoip/rule-set/geoip-cn.srs",
|
||||
"download_detour": "proxy"
|
||||
},
|
||||
{
|
||||
"tag": "geoip-us",
|
||||
"type": "remote",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geoip/rule-set/geoip-us.srs",
|
||||
"download_detour": "proxy"
|
||||
}
|
||||
]
|
||||
},
|
||||
"experimental": {
|
||||
"cache_file": {
|
||||
"enabled": true // required to save Rule Set cache
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### :material-checkbox-intermediate: Migrate Geosite to rule sets
|
||||
|
||||
!!! info "Reference"
|
||||
|
||||
[Geosite](/configuration/route/geosite) /
|
||||
[Route](/configuration/route) /
|
||||
[Route Rule](/configuration/route/rule) /
|
||||
[DNS Rule](/configuration/dns/rule) /
|
||||
[Rule Set](/configuration/rule-set)
|
||||
|
||||
!!! tip
|
||||
|
||||
`sing-box geosite` commands can help you convert custom Geosite into rule sets.
|
||||
|
||||
=== ":material-card-remove: Deprecated"
|
||||
|
||||
```json
|
||||
{
|
||||
"route": {
|
||||
"rules": [
|
||||
{
|
||||
"geosite": "cn",
|
||||
"outbound": "direct"
|
||||
}
|
||||
],
|
||||
"geosite": {
|
||||
"download_detour": "proxy"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
=== ":material-card-multiple: New"
|
||||
|
||||
```json
|
||||
{
|
||||
"route": {
|
||||
"rules": [
|
||||
{
|
||||
"rule_set": "geosite-cn",
|
||||
"outbound": "direct"
|
||||
}
|
||||
],
|
||||
"rule_set": [
|
||||
{
|
||||
"tag": "geosite-cn",
|
||||
"type": "remote",
|
||||
"format": "binary",
|
||||
"url": "https://raw.githubusercontent.com/SagerNet/sing-geosite/rule-set/geosite-cn.srs",
|
||||
"download_detour": "proxy"
|
||||
}
|
||||
]
|
||||
},
|
||||
"experimental": {
|
||||
"cache_file": {
|
||||
"enabled": true // required to save Rule Set cache
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/sagernet/bbolt"
|
||||
bboltErrors "github.com/sagernet/bbolt/errors"
|
||||
"github.com/sagernet/sing-box/adapter"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
"github.com/sagernet/sing/common"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
"github.com/sagernet/sing/service/filemanager"
|
||||
@ -21,21 +22,27 @@ var (
|
||||
bucketSelected = []byte("selected")
|
||||
bucketExpand = []byte("group_expand")
|
||||
bucketMode = []byte("clash_mode")
|
||||
bucketRuleSet = []byte("rule_set")
|
||||
|
||||
bucketNameList = []string{
|
||||
string(bucketSelected),
|
||||
string(bucketExpand),
|
||||
string(bucketMode),
|
||||
string(bucketRuleSet),
|
||||
}
|
||||
|
||||
cacheIDDefault = []byte("default")
|
||||
)
|
||||
|
||||
var _ adapter.ClashCacheFile = (*CacheFile)(nil)
|
||||
var _ adapter.CacheFile = (*CacheFile)(nil)
|
||||
|
||||
type CacheFile struct {
|
||||
ctx context.Context
|
||||
path string
|
||||
cacheID []byte
|
||||
storeFakeIP bool
|
||||
|
||||
DB *bbolt.DB
|
||||
cacheID []byte
|
||||
saveAccess sync.RWMutex
|
||||
saveDomain map[netip.Addr]string
|
||||
saveAddress4 map[string]netip.Addr
|
||||
@ -43,7 +50,29 @@ type CacheFile struct {
|
||||
saveMetadataTimer *time.Timer
|
||||
}
|
||||
|
||||
func Open(ctx context.Context, path string, cacheID string) (*CacheFile, error) {
|
||||
func NewCacheFile(ctx context.Context, options option.CacheFileOptions) *CacheFile {
|
||||
var path string
|
||||
if options.Path != "" {
|
||||
path = options.Path
|
||||
} else {
|
||||
path = "cache.db"
|
||||
}
|
||||
var cacheIDBytes []byte
|
||||
if options.CacheID != "" {
|
||||
cacheIDBytes = append([]byte{0}, []byte(options.CacheID)...)
|
||||
}
|
||||
return &CacheFile{
|
||||
ctx: ctx,
|
||||
path: filemanager.BasePath(ctx, path),
|
||||
cacheID: cacheIDBytes,
|
||||
storeFakeIP: options.StoreFakeIP,
|
||||
saveDomain: make(map[netip.Addr]string),
|
||||
saveAddress4: make(map[string]netip.Addr),
|
||||
saveAddress6: make(map[string]netip.Addr),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CacheFile) start() error {
|
||||
const fileMode = 0o666
|
||||
options := bbolt.Options{Timeout: time.Second}
|
||||
var (
|
||||
@ -51,7 +80,7 @@ func Open(ctx context.Context, path string, cacheID string) (*CacheFile, error)
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 10; i++ {
|
||||
db, err = bbolt.Open(path, fileMode, &options)
|
||||
db, err = bbolt.Open(c.path, fileMode, &options)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
@ -59,23 +88,20 @@ func Open(ctx context.Context, path string, cacheID string) (*CacheFile, error)
|
||||
continue
|
||||
}
|
||||
if E.IsMulti(err, bboltErrors.ErrInvalid, bboltErrors.ErrChecksum, bboltErrors.ErrVersionMismatch) {
|
||||
rmErr := os.Remove(path)
|
||||
rmErr := os.Remove(c.path)
|
||||
if rmErr != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
err = filemanager.Chown(ctx, path)
|
||||
err = filemanager.Chown(c.ctx, c.path)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "platform chown")
|
||||
}
|
||||
var cacheIDBytes []byte
|
||||
if cacheID != "" {
|
||||
cacheIDBytes = append([]byte{0}, []byte(cacheID)...)
|
||||
db.Close()
|
||||
return E.Cause(err, "platform chown")
|
||||
}
|
||||
err = db.Batch(func(tx *bbolt.Tx) error {
|
||||
return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
|
||||
@ -97,15 +123,30 @@ func Open(ctx context.Context, path string, cacheID string) (*CacheFile, error)
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
db.Close()
|
||||
return err
|
||||
}
|
||||
return &CacheFile{
|
||||
DB: db,
|
||||
cacheID: cacheIDBytes,
|
||||
saveDomain: make(map[netip.Addr]string),
|
||||
saveAddress4: make(map[string]netip.Addr),
|
||||
saveAddress6: make(map[string]netip.Addr),
|
||||
}, nil
|
||||
c.DB = db
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CacheFile) PreStart() error {
|
||||
return c.start()
|
||||
}
|
||||
|
||||
func (c *CacheFile) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CacheFile) Close() error {
|
||||
if c.DB == nil {
|
||||
return nil
|
||||
}
|
||||
return c.DB.Close()
|
||||
}
|
||||
|
||||
func (c *CacheFile) StoreFakeIP() bool {
|
||||
return c.storeFakeIP
|
||||
}
|
||||
|
||||
func (c *CacheFile) LoadMode() string {
|
||||
@ -219,6 +260,35 @@ func (c *CacheFile) StoreGroupExpand(group string, isExpand bool) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CacheFile) Close() error {
|
||||
return c.DB.Close()
|
||||
func (c *CacheFile) LoadRuleSet(tag string) *adapter.SavedRuleSet {
|
||||
var savedSet adapter.SavedRuleSet
|
||||
err := c.DB.View(func(t *bbolt.Tx) error {
|
||||
bucket := c.bucket(t, bucketRuleSet)
|
||||
if bucket == nil {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
setBinary := bucket.Get([]byte(tag))
|
||||
if len(setBinary) == 0 {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
return savedSet.UnmarshalBinary(setBinary)
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &savedSet
|
||||
}
|
||||
|
||||
func (c *CacheFile) SaveRuleSet(tag string, set *adapter.SavedRuleSet) error {
|
||||
return c.DB.Batch(func(t *bbolt.Tx) error {
|
||||
bucket, err := c.createBucket(t, bucketRuleSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setBinary, err := set.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put([]byte(tag), setBinary)
|
||||
})
|
||||
}
|
@ -25,7 +25,7 @@ func (c *CacheFile) FakeIPMetadata() *adapter.FakeIPMetadata {
|
||||
err := c.DB.Batch(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(bucketFakeIP)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
return os.ErrNotExist
|
||||
}
|
||||
metadataBinary := bucket.Get(keyMetadata)
|
||||
if len(metadataBinary) == 0 {
|
@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/sagernet/sing-box/adapter"
|
||||
C "github.com/sagernet/sing-box/constant"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
"github.com/sagernet/sing/common"
|
||||
@ -27,24 +28,37 @@ func NewClashServer(ctx context.Context, router adapter.Router, logFactory log.O
|
||||
|
||||
func CalculateClashModeList(options option.Options) []string {
|
||||
var clashMode []string
|
||||
for _, dnsRule := range common.PtrValueOrDefault(options.DNS).Rules {
|
||||
if dnsRule.DefaultOptions.ClashMode != "" && !common.Contains(clashMode, dnsRule.DefaultOptions.ClashMode) {
|
||||
clashMode = append(clashMode, dnsRule.DefaultOptions.ClashMode)
|
||||
}
|
||||
for _, defaultRule := range dnsRule.LogicalOptions.Rules {
|
||||
if defaultRule.ClashMode != "" && !common.Contains(clashMode, defaultRule.ClashMode) {
|
||||
clashMode = append(clashMode, defaultRule.ClashMode)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, rule := range common.PtrValueOrDefault(options.Route).Rules {
|
||||
if rule.DefaultOptions.ClashMode != "" && !common.Contains(clashMode, rule.DefaultOptions.ClashMode) {
|
||||
clashMode = append(clashMode, rule.DefaultOptions.ClashMode)
|
||||
}
|
||||
for _, defaultRule := range rule.LogicalOptions.Rules {
|
||||
if defaultRule.ClashMode != "" && !common.Contains(clashMode, defaultRule.ClashMode) {
|
||||
clashMode = append(clashMode, defaultRule.ClashMode)
|
||||
clashMode = append(clashMode, extraClashModeFromRule(common.PtrValueOrDefault(options.Route).Rules)...)
|
||||
clashMode = append(clashMode, extraClashModeFromDNSRule(common.PtrValueOrDefault(options.DNS).Rules)...)
|
||||
clashMode = common.FilterNotDefault(common.Uniq(clashMode))
|
||||
return clashMode
|
||||
}
|
||||
|
||||
func extraClashModeFromRule(rules []option.Rule) []string {
|
||||
var clashMode []string
|
||||
for _, rule := range rules {
|
||||
switch rule.Type {
|
||||
case C.RuleTypeDefault:
|
||||
if rule.DefaultOptions.ClashMode != "" {
|
||||
clashMode = append(clashMode, rule.DefaultOptions.ClashMode)
|
||||
}
|
||||
case C.RuleTypeLogical:
|
||||
clashMode = append(clashMode, extraClashModeFromRule(rule.LogicalOptions.Rules)...)
|
||||
}
|
||||
}
|
||||
return clashMode
|
||||
}
|
||||
|
||||
func extraClashModeFromDNSRule(rules []option.DNSRule) []string {
|
||||
var clashMode []string
|
||||
for _, rule := range rules {
|
||||
switch rule.Type {
|
||||
case C.RuleTypeDefault:
|
||||
if rule.DefaultOptions.ClashMode != "" {
|
||||
clashMode = append(clashMode, rule.DefaultOptions.ClashMode)
|
||||
}
|
||||
case C.RuleTypeLogical:
|
||||
clashMode = append(clashMode, extraClashModeFromDNSRule(rule.LogicalOptions.Rules)...)
|
||||
}
|
||||
}
|
||||
return clashMode
|
||||
|
@ -1,23 +1,26 @@
|
||||
package clashapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/sagernet/sing-box/adapter"
|
||||
"github.com/sagernet/sing/service"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
func cacheRouter(router adapter.Router) http.Handler {
|
||||
func cacheRouter(ctx context.Context) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Post("/fakeip/flush", flushFakeip(router))
|
||||
r.Post("/fakeip/flush", flushFakeip(ctx))
|
||||
return r
|
||||
}
|
||||
|
||||
func flushFakeip(router adapter.Router) func(w http.ResponseWriter, r *http.Request) {
|
||||
func flushFakeip(ctx context.Context) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if cacheFile := router.ClashServer().CacheFile(); cacheFile != nil {
|
||||
cacheFile := service.FromContext[adapter.CacheFile](ctx)
|
||||
if cacheFile != nil {
|
||||
err := cacheFile.FakeIPReset()
|
||||
if err != nil {
|
||||
render.Status(r, http.StatusInternalServerError)
|
||||
|
@ -100,8 +100,10 @@ func getProxies(server *Server, router adapter.Router) func(w http.ResponseWrite
|
||||
allProxies = append(allProxies, detour.Tag())
|
||||
}
|
||||
|
||||
defaultTag := router.DefaultOutbound(N.NetworkTCP).Tag()
|
||||
if defaultTag == "" {
|
||||
var defaultTag string
|
||||
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
|
||||
defaultTag = defaultOutbound.Tag()
|
||||
} else {
|
||||
defaultTag = allProxies[0]
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"github.com/sagernet/sing-box/common/urltest"
|
||||
C "github.com/sagernet/sing-box/constant"
|
||||
"github.com/sagernet/sing-box/experimental"
|
||||
"github.com/sagernet/sing-box/experimental/clashapi/cachefile"
|
||||
"github.com/sagernet/sing-box/experimental/clashapi/trafficontrol"
|
||||
"github.com/sagernet/sing-box/log"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
@ -49,12 +48,6 @@ type Server struct {
|
||||
mode string
|
||||
modeList []string
|
||||
modeUpdateHook chan<- struct{}
|
||||
storeMode bool
|
||||
storeSelected bool
|
||||
storeFakeIP bool
|
||||
cacheFilePath string
|
||||
cacheID string
|
||||
cacheFile adapter.ClashCacheFile
|
||||
|
||||
externalController bool
|
||||
externalUI string
|
||||
@ -76,9 +69,6 @@ func NewServer(ctx context.Context, router adapter.Router, logFactory log.Observ
|
||||
trafficManager: trafficManager,
|
||||
modeList: options.ModeList,
|
||||
externalController: options.ExternalController != "",
|
||||
storeMode: options.StoreMode,
|
||||
storeSelected: options.StoreSelected,
|
||||
storeFakeIP: options.StoreFakeIP,
|
||||
externalUIDownloadURL: options.ExternalUIDownloadURL,
|
||||
externalUIDownloadDetour: options.ExternalUIDownloadDetour,
|
||||
}
|
||||
@ -94,18 +84,10 @@ func NewServer(ctx context.Context, router adapter.Router, logFactory log.Observ
|
||||
server.modeList = append([]string{defaultMode}, server.modeList...)
|
||||
}
|
||||
server.mode = defaultMode
|
||||
if options.StoreMode || options.StoreSelected || options.StoreFakeIP || options.ExternalController == "" {
|
||||
cachePath := os.ExpandEnv(options.CacheFile)
|
||||
if cachePath == "" {
|
||||
cachePath = "cache.db"
|
||||
}
|
||||
if foundPath, loaded := C.FindPath(cachePath); loaded {
|
||||
cachePath = foundPath
|
||||
} else {
|
||||
cachePath = filemanager.BasePath(ctx, cachePath)
|
||||
}
|
||||
server.cacheFilePath = cachePath
|
||||
server.cacheID = options.CacheID
|
||||
//goland:noinspection GoDeprecation
|
||||
//nolint:staticcheck
|
||||
if options.StoreMode || options.StoreSelected || options.StoreFakeIP || options.CacheFile != "" || options.CacheID != "" {
|
||||
return nil, E.New("cache_file and related fields in Clash API is deprecated in sing-box 1.8.0, use experimental.cache_file instead.")
|
||||
}
|
||||
cors := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
@ -128,7 +110,7 @@ func NewServer(ctx context.Context, router adapter.Router, logFactory log.Observ
|
||||
r.Mount("/providers/rules", ruleProviderRouter())
|
||||
r.Mount("/script", scriptRouter())
|
||||
r.Mount("/profile", profileRouter())
|
||||
r.Mount("/cache", cacheRouter(router))
|
||||
r.Mount("/cache", cacheRouter(ctx))
|
||||
r.Mount("/dns", dnsRouter(router))
|
||||
|
||||
server.setupMetaAPI(r)
|
||||
@ -147,19 +129,13 @@ func NewServer(ctx context.Context, router adapter.Router, logFactory log.Observ
|
||||
}
|
||||
|
||||
func (s *Server) PreStart() error {
|
||||
if s.cacheFilePath != "" {
|
||||
cacheFile, err := cachefile.Open(s.ctx, s.cacheFilePath, s.cacheID)
|
||||
if err != nil {
|
||||
return E.Cause(err, "open cache file")
|
||||
}
|
||||
s.cacheFile = cacheFile
|
||||
if s.storeMode {
|
||||
mode := s.cacheFile.LoadMode()
|
||||
if common.Any(s.modeList, func(it string) bool {
|
||||
return strings.EqualFold(it, mode)
|
||||
}) {
|
||||
s.mode = mode
|
||||
}
|
||||
cacheFile := service.FromContext[adapter.CacheFile](s.ctx)
|
||||
if cacheFile != nil {
|
||||
mode := cacheFile.LoadMode()
|
||||
if common.Any(s.modeList, func(it string) bool {
|
||||
return strings.EqualFold(it, mode)
|
||||
}) {
|
||||
s.mode = mode
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -187,7 +163,6 @@ func (s *Server) Close() error {
|
||||
return common.Close(
|
||||
common.PtrOrNil(s.httpServer),
|
||||
s.trafficManager,
|
||||
s.cacheFile,
|
||||
s.urlTestHistory,
|
||||
)
|
||||
}
|
||||
@ -224,8 +199,9 @@ func (s *Server) SetMode(newMode string) {
|
||||
}
|
||||
}
|
||||
s.router.ClearDNSCache()
|
||||
if s.storeMode {
|
||||
err := s.cacheFile.StoreMode(newMode)
|
||||
cacheFile := service.FromContext[adapter.CacheFile](s.ctx)
|
||||
if cacheFile != nil {
|
||||
err := cacheFile.StoreMode(newMode)
|
||||
if err != nil {
|
||||
s.logger.Error(E.Cause(err, "save mode"))
|
||||
}
|
||||
@ -233,18 +209,6 @@ func (s *Server) SetMode(newMode string) {
|
||||
s.logger.Info("updated mode: ", newMode)
|
||||
}
|
||||
|
||||
func (s *Server) StoreSelected() bool {
|
||||
return s.storeSelected
|
||||
}
|
||||
|
||||
func (s *Server) StoreFakeIP() bool {
|
||||
return s.storeFakeIP
|
||||
}
|
||||
|
||||
func (s *Server) CacheFile() adapter.ClashCacheFile {
|
||||
return s.cacheFile
|
||||
}
|
||||
|
||||
func (s *Server) HistoryStorage() *urltest.HistoryStorage {
|
||||
return s.urlTestHistory
|
||||
}
|
||||
|
@ -51,7 +51,11 @@ func (s *Server) downloadExternalUI() error {
|
||||
}
|
||||
detour = outbound
|
||||
} else {
|
||||
detour = s.router.DefaultOutbound(N.NetworkTCP)
|
||||
outbound, err := s.router.DefaultOutbound(N.NetworkTCP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
detour = outbound
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
|
@ -94,7 +94,9 @@ func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router ad
|
||||
var chain []string
|
||||
var next string
|
||||
if rule == nil {
|
||||
next = router.DefaultOutbound(N.NetworkTCP).Tag()
|
||||
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
|
||||
next = defaultOutbound.Tag()
|
||||
}
|
||||
} else {
|
||||
next = rule.Outbound()
|
||||
}
|
||||
@ -181,7 +183,9 @@ func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, route
|
||||
var chain []string
|
||||
var next string
|
||||
if rule == nil {
|
||||
next = router.DefaultOutbound(N.NetworkUDP).Tag()
|
||||
if defaultOutbound, err := router.DefaultOutbound(N.NetworkUDP); err == nil {
|
||||
next = defaultOutbound.Tag()
|
||||
}
|
||||
} else {
|
||||
next = rule.Outbound()
|
||||
}
|
||||
|
@ -159,11 +159,7 @@ func readGroups(reader io.Reader) (OutboundGroupIterator, error) {
|
||||
|
||||
func writeGroups(writer io.Writer, boxService *BoxService) error {
|
||||
historyStorage := service.PtrFromContext[urltest.HistoryStorage](boxService.ctx)
|
||||
var cacheFile adapter.ClashCacheFile
|
||||
if clashServer := boxService.instance.Router().ClashServer(); clashServer != nil {
|
||||
cacheFile = clashServer.CacheFile()
|
||||
}
|
||||
|
||||
cacheFile := service.FromContext[adapter.CacheFile](boxService.ctx)
|
||||
outbounds := boxService.instance.Router().Outbounds()
|
||||
var iGroups []adapter.OutboundGroup
|
||||
for _, it := range outbounds {
|
||||
@ -288,16 +284,15 @@ func (s *CommandServer) handleSetGroupExpand(conn net.Conn) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := s.service
|
||||
if service == nil {
|
||||
serviceNow := s.service
|
||||
if serviceNow == nil {
|
||||
return writeError(conn, E.New("service not ready"))
|
||||
}
|
||||
if clashServer := service.instance.Router().ClashServer(); clashServer != nil {
|
||||
if cacheFile := clashServer.CacheFile(); cacheFile != nil {
|
||||
err = cacheFile.StoreGroupExpand(groupTag, isExpand)
|
||||
if err != nil {
|
||||
return writeError(conn, err)
|
||||
}
|
||||
cacheFile := service.FromContext[adapter.CacheFile](serviceNow.ctx)
|
||||
if cacheFile != nil {
|
||||
err = cacheFile.StoreGroupExpand(groupTag, isExpand)
|
||||
if err != nil {
|
||||
return writeError(conn, err)
|
||||
}
|
||||
}
|
||||
return writeError(conn, nil)
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/sagernet/sing/common/batch"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
"github.com/sagernet/sing/common/rw"
|
||||
"github.com/sagernet/sing/service"
|
||||
)
|
||||
|
||||
func (c *CommandClient) URLTest(groupTag string) error {
|
||||
@ -37,11 +38,11 @@ func (s *CommandServer) handleURLTest(conn net.Conn) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := s.service
|
||||
if service == nil {
|
||||
serviceNow := s.service
|
||||
if serviceNow == nil {
|
||||
return nil
|
||||
}
|
||||
abstractOutboundGroup, isLoaded := service.instance.Router().Outbound(groupTag)
|
||||
abstractOutboundGroup, isLoaded := serviceNow.instance.Router().Outbound(groupTag)
|
||||
if !isLoaded {
|
||||
return writeError(conn, E.New("outbound group not found: ", groupTag))
|
||||
}
|
||||
@ -53,15 +54,9 @@ func (s *CommandServer) handleURLTest(conn net.Conn) error {
|
||||
if isURLTest {
|
||||
go urlTest.CheckOutbounds()
|
||||
} else {
|
||||
var historyStorage *urltest.HistoryStorage
|
||||
if clashServer := service.instance.Router().ClashServer(); clashServer != nil {
|
||||
historyStorage = clashServer.HistoryStorage()
|
||||
} else {
|
||||
return writeError(conn, E.New("Clash API is required for URLTest on non-URLTest group"))
|
||||
}
|
||||
|
||||
historyStorage := service.PtrFromContext[urltest.HistoryStorage](serviceNow.ctx)
|
||||
outbounds := common.Filter(common.Map(outboundGroup.All(), func(it string) adapter.Outbound {
|
||||
itOutbound, _ := service.instance.Router().Outbound(it)
|
||||
itOutbound, _ := serviceNow.instance.Router().Outbound(it)
|
||||
return itOutbound
|
||||
}), func(it adapter.Outbound) bool {
|
||||
if it == nil {
|
||||
@ -73,12 +68,12 @@ func (s *CommandServer) handleURLTest(conn net.Conn) error {
|
||||
}
|
||||
return true
|
||||
})
|
||||
b, _ := batch.New(service.ctx, batch.WithConcurrencyNum[any](10))
|
||||
b, _ := batch.New(serviceNow.ctx, batch.WithConcurrencyNum[any](10))
|
||||
for _, detour := range outbounds {
|
||||
outboundToTest := detour
|
||||
outboundTag := outboundToTest.Tag()
|
||||
b.Go(outboundTag, func() (any, error) {
|
||||
t, err := urltest.URLTest(service.ctx, "", outboundToTest)
|
||||
t, err := urltest.URLTest(serviceNow.ctx, "", outboundToTest)
|
||||
if err != nil {
|
||||
historyStorage.DeleteURLTestHistory(outboundTag)
|
||||
} else {
|
||||
|
@ -3,9 +3,9 @@ package libbox
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/sagernet/sing-box"
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
)
|
||||
|
2
go.mod
2
go.mod
@ -26,7 +26,7 @@ require (
|
||||
github.com/sagernet/gvisor v0.0.0-20231119034329-07cfb6aaf930
|
||||
github.com/sagernet/quic-go v0.40.0
|
||||
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691
|
||||
github.com/sagernet/sing v0.2.18-0.20231201054122-bca74039ead5
|
||||
github.com/sagernet/sing v0.2.18-0.20231201060417-575186ed63c2
|
||||
github.com/sagernet/sing-dns v0.1.11
|
||||
github.com/sagernet/sing-mux v0.1.5-0.20231109075101-6b086ed6bb07
|
||||
github.com/sagernet/sing-quic v0.1.5-0.20231123150216-00957d136203
|
||||
|
4
go.sum
4
go.sum
@ -110,8 +110,8 @@ github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691 h1:5Th31OC6yj8byL
|
||||
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691/go.mod h1:B8lp4WkQ1PwNnrVMM6KyuFR20pU8jYBD+A4EhJovEXU=
|
||||
github.com/sagernet/sing v0.0.0-20220817130738-ce854cda8522/go.mod h1:QVsS5L/ZA2Q5UhQwLrn0Trw+msNd/NPGEhBKR/ioWiY=
|
||||
github.com/sagernet/sing v0.1.8/go.mod h1:jt1w2u7lJQFFSGLiRrRIs5YWmx4kAPfWuOejuDW9qMk=
|
||||
github.com/sagernet/sing v0.2.18-0.20231201054122-bca74039ead5 h1:luykfsWNqFh9sdLXlkCQtkuzLUPRd3BMsdQJt0REB1g=
|
||||
github.com/sagernet/sing v0.2.18-0.20231201054122-bca74039ead5/go.mod h1:OL6k2F0vHmEzXz2KW19qQzu172FDgSbUSODylighuVo=
|
||||
github.com/sagernet/sing v0.2.18-0.20231201060417-575186ed63c2 h1:1ydWkFgLURGlrnwRdjyrpo9lp1g5Qq7XrNBghMntWTs=
|
||||
github.com/sagernet/sing v0.2.18-0.20231201060417-575186ed63c2/go.mod h1:OL6k2F0vHmEzXz2KW19qQzu172FDgSbUSODylighuVo=
|
||||
github.com/sagernet/sing-dns v0.1.11 h1:PPrMCVVrAeR3f5X23I+cmvacXJ+kzuyAsBiWyUKhGSE=
|
||||
github.com/sagernet/sing-dns v0.1.11/go.mod h1:zJ/YjnYB61SYE+ubMcMqVdpaSvsyQ2iShQGO3vuLvvE=
|
||||
github.com/sagernet/sing-mux v0.1.5-0.20231109075101-6b086ed6bb07 h1:ncKb5tVOsCQgCsv6UpsA0jinbNb5OQ5GMPJlyQP3EHM=
|
||||
|
28
mkdocs.yml
28
mkdocs.yml
@ -32,12 +32,16 @@ theme:
|
||||
- content.code.copy
|
||||
- content.code.select
|
||||
- content.code.annotate
|
||||
icon:
|
||||
admonition:
|
||||
question: material/new-box
|
||||
nav:
|
||||
- Home:
|
||||
- index.md
|
||||
- Change Log: changelog.md
|
||||
- Migration: migration.md
|
||||
- Deprecated: deprecated.md
|
||||
- Support: support.md
|
||||
- Change Log: changelog.md
|
||||
- Installation:
|
||||
- Package Manager: installation/package-manager.md
|
||||
- Docker: installation/docker.md
|
||||
@ -56,7 +60,7 @@ nav:
|
||||
- Proxy:
|
||||
- Server: manual/proxy/server.md
|
||||
- Client: manual/proxy/client.md
|
||||
# - TUN: manual/proxy/tun.md
|
||||
# - TUN: manual/proxy/tun.md
|
||||
- Proxy Protocol:
|
||||
- Shadowsocks: manual/proxy-protocol/shadowsocks.md
|
||||
- Trojan: manual/proxy-protocol/trojan.md
|
||||
@ -79,8 +83,15 @@ nav:
|
||||
- Geosite: configuration/route/geosite.md
|
||||
- Route Rule: configuration/route/rule.md
|
||||
- Protocol Sniff: configuration/route/sniff.md
|
||||
- Rule Set:
|
||||
- configuration/rule-set/index.md
|
||||
- Source Format: configuration/rule-set/source-format.md
|
||||
- Headless Rule: configuration/rule-set/headless-rule.md
|
||||
- Experimental:
|
||||
- configuration/experimental/index.md
|
||||
- Cache File: configuration/experimental/cache-file.md
|
||||
- Clash API: configuration/experimental/clash-api.md
|
||||
- V2Ray API: configuration/experimental/v2ray-api.md
|
||||
- Shared:
|
||||
- Listen Fields: configuration/shared/listen.md
|
||||
- Dial Fields: configuration/shared/dial.md
|
||||
@ -180,9 +191,10 @@ plugins:
|
||||
name: 简体中文
|
||||
nav_translations:
|
||||
Home: 开始
|
||||
Change Log: 更新日志
|
||||
Migration: 迁移指南
|
||||
Deprecated: 废弃功能列表
|
||||
Support: 支持
|
||||
Change Log: 更新日志
|
||||
|
||||
Installation: 安装
|
||||
Package Manager: 包管理器
|
||||
@ -203,6 +215,10 @@ plugins:
|
||||
Route Rule: 路由规则
|
||||
Protocol Sniff: 协议探测
|
||||
|
||||
Rule Set: 规则集
|
||||
Source Format: 源文件格式
|
||||
Headless Rule: 无头规则
|
||||
|
||||
Experimental: 实验性
|
||||
|
||||
Shared: 通用
|
||||
@ -215,10 +231,6 @@ plugins:
|
||||
Inbound: 入站
|
||||
Outbound: 出站
|
||||
|
||||
FAQ: 常见问题
|
||||
Known Issues: 已知问题
|
||||
Examples: 示例
|
||||
Linux Server Installation: Linux 服务器安装
|
||||
DNS Hijack: DNS 劫持
|
||||
Manual: 手册
|
||||
reconfigure_material: true
|
||||
reconfigure_search: true
|
@ -1,31 +0,0 @@
|
||||
package option
|
||||
|
||||
type ClashAPIOptions struct {
|
||||
ExternalController string `json:"external_controller,omitempty"`
|
||||
ExternalUI string `json:"external_ui,omitempty"`
|
||||
ExternalUIDownloadURL string `json:"external_ui_download_url,omitempty"`
|
||||
ExternalUIDownloadDetour string `json:"external_ui_download_detour,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
DefaultMode string `json:"default_mode,omitempty"`
|
||||
StoreMode bool `json:"store_mode,omitempty"`
|
||||
StoreSelected bool `json:"store_selected,omitempty"`
|
||||
StoreFakeIP bool `json:"store_fakeip,omitempty"`
|
||||
CacheFile string `json:"cache_file,omitempty"`
|
||||
CacheID string `json:"cache_id,omitempty"`
|
||||
|
||||
ModeList []string `json:"-"`
|
||||
}
|
||||
|
||||
type SelectorOutboundOptions struct {
|
||||
Outbounds []string `json:"outbounds"`
|
||||
Default string `json:"default,omitempty"`
|
||||
InterruptExistConnections bool `json:"interrupt_exist_connections,omitempty"`
|
||||
}
|
||||
|
||||
type URLTestOutboundOptions struct {
|
||||
Outbounds []string `json:"outbounds"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Interval Duration `json:"interval,omitempty"`
|
||||
Tolerance uint16 `json:"tolerance,omitempty"`
|
||||
InterruptExistConnections bool `json:"interrupt_exist_connections,omitempty"`
|
||||
}
|
@ -1,9 +1,8 @@
|
||||
package option
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/sagernet/sing-box/common/humanize"
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
)
|
||||
|
||||
type DebugOptions struct {
|
||||
|
@ -1,7 +1,48 @@
|
||||
package option
|
||||
|
||||
type ExperimentalOptions struct {
|
||||
ClashAPI *ClashAPIOptions `json:"clash_api,omitempty"`
|
||||
V2RayAPI *V2RayAPIOptions `json:"v2ray_api,omitempty"`
|
||||
Debug *DebugOptions `json:"debug,omitempty"`
|
||||
CacheFile *CacheFileOptions `json:"cache_file,omitempty"`
|
||||
ClashAPI *ClashAPIOptions `json:"clash_api,omitempty"`
|
||||
V2RayAPI *V2RayAPIOptions `json:"v2ray_api,omitempty"`
|
||||
Debug *DebugOptions `json:"debug,omitempty"`
|
||||
}
|
||||
|
||||
type CacheFileOptions struct {
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
CacheID string `json:"cache_id,omitempty"`
|
||||
StoreFakeIP bool `json:"store_fakeip,omitempty"`
|
||||
}
|
||||
|
||||
type ClashAPIOptions struct {
|
||||
ExternalController string `json:"external_controller,omitempty"`
|
||||
ExternalUI string `json:"external_ui,omitempty"`
|
||||
ExternalUIDownloadURL string `json:"external_ui_download_url,omitempty"`
|
||||
ExternalUIDownloadDetour string `json:"external_ui_download_detour,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
DefaultMode string `json:"default_mode,omitempty"`
|
||||
ModeList []string `json:"-"`
|
||||
|
||||
// Deprecated: migrated to global cache file
|
||||
CacheFile string `json:"cache_file,omitempty"`
|
||||
// Deprecated: migrated to global cache file
|
||||
CacheID string `json:"cache_id,omitempty"`
|
||||
// Deprecated: migrated to global cache file
|
||||
StoreMode bool `json:"store_mode,omitempty"`
|
||||
// Deprecated: migrated to global cache file
|
||||
StoreSelected bool `json:"store_selected,omitempty"`
|
||||
// Deprecated: migrated to global cache file
|
||||
StoreFakeIP bool `json:"store_fakeip,omitempty"`
|
||||
}
|
||||
|
||||
type V2RayAPIOptions struct {
|
||||
Listen string `json:"listen,omitempty"`
|
||||
Stats *V2RayStatsServiceOptions `json:"stats,omitempty"`
|
||||
}
|
||||
|
||||
type V2RayStatsServiceOptions struct {
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
Inbounds []string `json:"inbounds,omitempty"`
|
||||
Outbounds []string `json:"outbounds,omitempty"`
|
||||
Users []string `json:"users,omitempty"`
|
||||
}
|
||||
|
15
option/group.go
Normal file
15
option/group.go
Normal file
@ -0,0 +1,15 @@
|
||||
package option
|
||||
|
||||
type SelectorOutboundOptions struct {
|
||||
Outbounds []string `json:"outbounds"`
|
||||
Default string `json:"default,omitempty"`
|
||||
InterruptExistConnections bool `json:"interrupt_exist_connections,omitempty"`
|
||||
}
|
||||
|
||||
type URLTestOutboundOptions struct {
|
||||
Outbounds []string `json:"outbounds"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Interval Duration `json:"interval,omitempty"`
|
||||
Tolerance uint16 `json:"tolerance,omitempty"`
|
||||
InterruptExistConnections bool `json:"interrupt_exist_connections,omitempty"`
|
||||
}
|
@ -114,7 +114,7 @@ func (h *Inbound) UnmarshalJSON(bytes []byte) error {
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_Inbound)(h), v)
|
||||
if err != nil {
|
||||
return E.Cause(err, "inbound options")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func (h *Outbound) UnmarshalJSON(bytes []byte) error {
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_Outbound)(h), v)
|
||||
if err != nil {
|
||||
return E.Cause(err, "outbound options")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ type RouteOptions struct {
|
||||
GeoIP *GeoIPOptions `json:"geoip,omitempty"`
|
||||
Geosite *GeositeOptions `json:"geosite,omitempty"`
|
||||
Rules []Rule `json:"rules,omitempty"`
|
||||
RuleSet []RuleSet `json:"rule_set,omitempty"`
|
||||
Final string `json:"final,omitempty"`
|
||||
FindProcess bool `json:"find_process,omitempty"`
|
||||
AutoDetectInterface bool `json:"auto_detect_interface,omitempty"`
|
||||
|
@ -48,40 +48,55 @@ func (r *Rule) UnmarshalJSON(bytes []byte) error {
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_Rule)(r), v)
|
||||
if err != nil {
|
||||
return E.Cause(err, "route rule")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r Rule) IsValid() bool {
|
||||
switch r.Type {
|
||||
case C.RuleTypeDefault:
|
||||
return r.DefaultOptions.IsValid()
|
||||
case C.RuleTypeLogical:
|
||||
return r.LogicalOptions.IsValid()
|
||||
default:
|
||||
panic("unknown rule type: " + r.Type)
|
||||
}
|
||||
}
|
||||
|
||||
type DefaultRule struct {
|
||||
Inbound Listable[string] `json:"inbound,omitempty"`
|
||||
IPVersion int `json:"ip_version,omitempty"`
|
||||
Network Listable[string] `json:"network,omitempty"`
|
||||
AuthUser Listable[string] `json:"auth_user,omitempty"`
|
||||
Protocol Listable[string] `json:"protocol,omitempty"`
|
||||
Domain Listable[string] `json:"domain,omitempty"`
|
||||
DomainSuffix Listable[string] `json:"domain_suffix,omitempty"`
|
||||
DomainKeyword Listable[string] `json:"domain_keyword,omitempty"`
|
||||
DomainRegex Listable[string] `json:"domain_regex,omitempty"`
|
||||
Geosite Listable[string] `json:"geosite,omitempty"`
|
||||
SourceGeoIP Listable[string] `json:"source_geoip,omitempty"`
|
||||
GeoIP Listable[string] `json:"geoip,omitempty"`
|
||||
SourceIPCIDR Listable[string] `json:"source_ip_cidr,omitempty"`
|
||||
IPCIDR Listable[string] `json:"ip_cidr,omitempty"`
|
||||
SourcePort Listable[uint16] `json:"source_port,omitempty"`
|
||||
SourcePortRange Listable[string] `json:"source_port_range,omitempty"`
|
||||
Port Listable[uint16] `json:"port,omitempty"`
|
||||
PortRange Listable[string] `json:"port_range,omitempty"`
|
||||
ProcessName Listable[string] `json:"process_name,omitempty"`
|
||||
ProcessPath Listable[string] `json:"process_path,omitempty"`
|
||||
PackageName Listable[string] `json:"package_name,omitempty"`
|
||||
User Listable[string] `json:"user,omitempty"`
|
||||
UserID Listable[int32] `json:"user_id,omitempty"`
|
||||
ClashMode string `json:"clash_mode,omitempty"`
|
||||
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
||||
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
Outbound string `json:"outbound,omitempty"`
|
||||
Inbound Listable[string] `json:"inbound,omitempty"`
|
||||
IPVersion int `json:"ip_version,omitempty"`
|
||||
Network Listable[string] `json:"network,omitempty"`
|
||||
AuthUser Listable[string] `json:"auth_user,omitempty"`
|
||||
Protocol Listable[string] `json:"protocol,omitempty"`
|
||||
Domain Listable[string] `json:"domain,omitempty"`
|
||||
DomainSuffix Listable[string] `json:"domain_suffix,omitempty"`
|
||||
DomainKeyword Listable[string] `json:"domain_keyword,omitempty"`
|
||||
DomainRegex Listable[string] `json:"domain_regex,omitempty"`
|
||||
Geosite Listable[string] `json:"geosite,omitempty"`
|
||||
SourceGeoIP Listable[string] `json:"source_geoip,omitempty"`
|
||||
GeoIP Listable[string] `json:"geoip,omitempty"`
|
||||
SourceIPCIDR Listable[string] `json:"source_ip_cidr,omitempty"`
|
||||
SourceIPIsPrivate bool `json:"source_ip_is_private,omitempty"`
|
||||
IPCIDR Listable[string] `json:"ip_cidr,omitempty"`
|
||||
IPIsPrivate bool `json:"ip_is_private,omitempty"`
|
||||
SourcePort Listable[uint16] `json:"source_port,omitempty"`
|
||||
SourcePortRange Listable[string] `json:"source_port_range,omitempty"`
|
||||
Port Listable[uint16] `json:"port,omitempty"`
|
||||
PortRange Listable[string] `json:"port_range,omitempty"`
|
||||
ProcessName Listable[string] `json:"process_name,omitempty"`
|
||||
ProcessPath Listable[string] `json:"process_path,omitempty"`
|
||||
PackageName Listable[string] `json:"package_name,omitempty"`
|
||||
User Listable[string] `json:"user,omitempty"`
|
||||
UserID Listable[int32] `json:"user_id,omitempty"`
|
||||
ClashMode string `json:"clash_mode,omitempty"`
|
||||
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
||||
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
||||
RuleSet Listable[string] `json:"rule_set,omitempty"`
|
||||
RuleSetIPCIDRMatchSource bool `json:"rule_set_ipcidr_match_source,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
Outbound string `json:"outbound,omitempty"`
|
||||
}
|
||||
|
||||
func (r DefaultRule) IsValid() bool {
|
||||
@ -92,12 +107,12 @@ func (r DefaultRule) IsValid() bool {
|
||||
}
|
||||
|
||||
type LogicalRule struct {
|
||||
Mode string `json:"mode"`
|
||||
Rules []DefaultRule `json:"rules,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
Outbound string `json:"outbound,omitempty"`
|
||||
Mode string `json:"mode"`
|
||||
Rules []Rule `json:"rules,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
Outbound string `json:"outbound,omitempty"`
|
||||
}
|
||||
|
||||
func (r LogicalRule) IsValid() bool {
|
||||
return len(r.Rules) > 0 && common.All(r.Rules, DefaultRule.IsValid)
|
||||
return len(r.Rules) > 0 && common.All(r.Rules, Rule.IsValid)
|
||||
}
|
||||
|
@ -48,42 +48,55 @@ func (r *DNSRule) UnmarshalJSON(bytes []byte) error {
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_DNSRule)(r), v)
|
||||
if err != nil {
|
||||
return E.Cause(err, "dns route rule")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r DNSRule) IsValid() bool {
|
||||
switch r.Type {
|
||||
case C.RuleTypeDefault:
|
||||
return r.DefaultOptions.IsValid()
|
||||
case C.RuleTypeLogical:
|
||||
return r.LogicalOptions.IsValid()
|
||||
default:
|
||||
panic("unknown DNS rule type: " + r.Type)
|
||||
}
|
||||
}
|
||||
|
||||
type DefaultDNSRule struct {
|
||||
Inbound Listable[string] `json:"inbound,omitempty"`
|
||||
IPVersion int `json:"ip_version,omitempty"`
|
||||
QueryType Listable[DNSQueryType] `json:"query_type,omitempty"`
|
||||
Network Listable[string] `json:"network,omitempty"`
|
||||
AuthUser Listable[string] `json:"auth_user,omitempty"`
|
||||
Protocol Listable[string] `json:"protocol,omitempty"`
|
||||
Domain Listable[string] `json:"domain,omitempty"`
|
||||
DomainSuffix Listable[string] `json:"domain_suffix,omitempty"`
|
||||
DomainKeyword Listable[string] `json:"domain_keyword,omitempty"`
|
||||
DomainRegex Listable[string] `json:"domain_regex,omitempty"`
|
||||
Geosite Listable[string] `json:"geosite,omitempty"`
|
||||
SourceGeoIP Listable[string] `json:"source_geoip,omitempty"`
|
||||
SourceIPCIDR Listable[string] `json:"source_ip_cidr,omitempty"`
|
||||
SourcePort Listable[uint16] `json:"source_port,omitempty"`
|
||||
SourcePortRange Listable[string] `json:"source_port_range,omitempty"`
|
||||
Port Listable[uint16] `json:"port,omitempty"`
|
||||
PortRange Listable[string] `json:"port_range,omitempty"`
|
||||
ProcessName Listable[string] `json:"process_name,omitempty"`
|
||||
ProcessPath Listable[string] `json:"process_path,omitempty"`
|
||||
PackageName Listable[string] `json:"package_name,omitempty"`
|
||||
User Listable[string] `json:"user,omitempty"`
|
||||
UserID Listable[int32] `json:"user_id,omitempty"`
|
||||
Outbound Listable[string] `json:"outbound,omitempty"`
|
||||
ClashMode string `json:"clash_mode,omitempty"`
|
||||
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
||||
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
Server string `json:"server,omitempty"`
|
||||
DisableCache bool `json:"disable_cache,omitempty"`
|
||||
RewriteTTL *uint32 `json:"rewrite_ttl,omitempty"`
|
||||
Inbound Listable[string] `json:"inbound,omitempty"`
|
||||
IPVersion int `json:"ip_version,omitempty"`
|
||||
QueryType Listable[DNSQueryType] `json:"query_type,omitempty"`
|
||||
Network Listable[string] `json:"network,omitempty"`
|
||||
AuthUser Listable[string] `json:"auth_user,omitempty"`
|
||||
Protocol Listable[string] `json:"protocol,omitempty"`
|
||||
Domain Listable[string] `json:"domain,omitempty"`
|
||||
DomainSuffix Listable[string] `json:"domain_suffix,omitempty"`
|
||||
DomainKeyword Listable[string] `json:"domain_keyword,omitempty"`
|
||||
DomainRegex Listable[string] `json:"domain_regex,omitempty"`
|
||||
Geosite Listable[string] `json:"geosite,omitempty"`
|
||||
SourceGeoIP Listable[string] `json:"source_geoip,omitempty"`
|
||||
SourceIPCIDR Listable[string] `json:"source_ip_cidr,omitempty"`
|
||||
SourceIPIsPrivate bool `json:"source_ip_is_private,omitempty"`
|
||||
SourcePort Listable[uint16] `json:"source_port,omitempty"`
|
||||
SourcePortRange Listable[string] `json:"source_port_range,omitempty"`
|
||||
Port Listable[uint16] `json:"port,omitempty"`
|
||||
PortRange Listable[string] `json:"port_range,omitempty"`
|
||||
ProcessName Listable[string] `json:"process_name,omitempty"`
|
||||
ProcessPath Listable[string] `json:"process_path,omitempty"`
|
||||
PackageName Listable[string] `json:"package_name,omitempty"`
|
||||
User Listable[string] `json:"user,omitempty"`
|
||||
UserID Listable[int32] `json:"user_id,omitempty"`
|
||||
Outbound Listable[string] `json:"outbound,omitempty"`
|
||||
ClashMode string `json:"clash_mode,omitempty"`
|
||||
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
||||
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
||||
RuleSet Listable[string] `json:"rule_set,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
Server string `json:"server,omitempty"`
|
||||
DisableCache bool `json:"disable_cache,omitempty"`
|
||||
RewriteTTL *uint32 `json:"rewrite_ttl,omitempty"`
|
||||
}
|
||||
|
||||
func (r DefaultDNSRule) IsValid() bool {
|
||||
@ -96,14 +109,14 @@ func (r DefaultDNSRule) IsValid() bool {
|
||||
}
|
||||
|
||||
type LogicalDNSRule struct {
|
||||
Mode string `json:"mode"`
|
||||
Rules []DefaultDNSRule `json:"rules,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
Server string `json:"server,omitempty"`
|
||||
DisableCache bool `json:"disable_cache,omitempty"`
|
||||
RewriteTTL *uint32 `json:"rewrite_ttl,omitempty"`
|
||||
Mode string `json:"mode"`
|
||||
Rules []DNSRule `json:"rules,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
Server string `json:"server,omitempty"`
|
||||
DisableCache bool `json:"disable_cache,omitempty"`
|
||||
RewriteTTL *uint32 `json:"rewrite_ttl,omitempty"`
|
||||
}
|
||||
|
||||
func (r LogicalDNSRule) IsValid() bool {
|
||||
return len(r.Rules) > 0 && common.All(r.Rules, DefaultDNSRule.IsValid)
|
||||
return len(r.Rules) > 0 && common.All(r.Rules, DNSRule.IsValid)
|
||||
}
|
||||
|
230
option/rule_set.go
Normal file
230
option/rule_set.go
Normal file
@ -0,0 +1,230 @@
|
||||
package option
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/sagernet/sing-box/common/json"
|
||||
C "github.com/sagernet/sing-box/constant"
|
||||
"github.com/sagernet/sing/common"
|
||||
"github.com/sagernet/sing/common/domain"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
F "github.com/sagernet/sing/common/format"
|
||||
|
||||
"go4.org/netipx"
|
||||
)
|
||||
|
||||
type _RuleSet struct {
|
||||
Type string `json:"type"`
|
||||
Tag string `json:"tag"`
|
||||
Format string `json:"format"`
|
||||
LocalOptions LocalRuleSet `json:"-"`
|
||||
RemoteOptions RemoteRuleSet `json:"-"`
|
||||
}
|
||||
|
||||
type RuleSet _RuleSet
|
||||
|
||||
func (r RuleSet) MarshalJSON() ([]byte, error) {
|
||||
var v any
|
||||
switch r.Type {
|
||||
case C.RuleSetTypeLocal:
|
||||
v = r.LocalOptions
|
||||
case C.RuleSetTypeRemote:
|
||||
v = r.RemoteOptions
|
||||
default:
|
||||
return nil, E.New("unknown rule set type: " + r.Type)
|
||||
}
|
||||
return MarshallObjects((_RuleSet)(r), v)
|
||||
}
|
||||
|
||||
func (r *RuleSet) UnmarshalJSON(bytes []byte) error {
|
||||
err := json.Unmarshal(bytes, (*_RuleSet)(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if r.Tag == "" {
|
||||
return E.New("missing rule_set.[].tag")
|
||||
}
|
||||
switch r.Format {
|
||||
case "":
|
||||
return E.New("missing rule_set.[].format")
|
||||
case C.RuleSetFormatSource, C.RuleSetFormatBinary:
|
||||
default:
|
||||
return E.New("unknown rule set format: " + r.Format)
|
||||
}
|
||||
var v any
|
||||
switch r.Type {
|
||||
case C.RuleSetTypeLocal:
|
||||
v = &r.LocalOptions
|
||||
case C.RuleSetTypeRemote:
|
||||
v = &r.RemoteOptions
|
||||
case "":
|
||||
return E.New("missing rule_set.[].type")
|
||||
default:
|
||||
return E.New("unknown rule set type: " + r.Type)
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_RuleSet)(r), v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LocalRuleSet struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
type RemoteRuleSet struct {
|
||||
URL string `json:"url"`
|
||||
DownloadDetour string `json:"download_detour,omitempty"`
|
||||
UpdateInterval Duration `json:"update_interval,omitempty"`
|
||||
}
|
||||
|
||||
type _HeadlessRule struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
DefaultOptions DefaultHeadlessRule `json:"-"`
|
||||
LogicalOptions LogicalHeadlessRule `json:"-"`
|
||||
}
|
||||
|
||||
type HeadlessRule _HeadlessRule
|
||||
|
||||
func (r HeadlessRule) MarshalJSON() ([]byte, error) {
|
||||
var v any
|
||||
switch r.Type {
|
||||
case C.RuleTypeDefault:
|
||||
r.Type = ""
|
||||
v = r.DefaultOptions
|
||||
case C.RuleTypeLogical:
|
||||
v = r.LogicalOptions
|
||||
default:
|
||||
return nil, E.New("unknown rule type: " + r.Type)
|
||||
}
|
||||
return MarshallObjects((_HeadlessRule)(r), v)
|
||||
}
|
||||
|
||||
func (r *HeadlessRule) UnmarshalJSON(bytes []byte) error {
|
||||
err := json.Unmarshal(bytes, (*_HeadlessRule)(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var v any
|
||||
switch r.Type {
|
||||
case "", C.RuleTypeDefault:
|
||||
r.Type = C.RuleTypeDefault
|
||||
v = &r.DefaultOptions
|
||||
case C.RuleTypeLogical:
|
||||
v = &r.LogicalOptions
|
||||
default:
|
||||
return E.New("unknown rule type: " + r.Type)
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_HeadlessRule)(r), v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r HeadlessRule) IsValid() bool {
|
||||
switch r.Type {
|
||||
case C.RuleTypeDefault, "":
|
||||
return r.DefaultOptions.IsValid()
|
||||
case C.RuleTypeLogical:
|
||||
return r.LogicalOptions.IsValid()
|
||||
default:
|
||||
panic("unknown rule type: " + r.Type)
|
||||
}
|
||||
}
|
||||
|
||||
type DefaultHeadlessRule struct {
|
||||
QueryType Listable[DNSQueryType] `json:"query_type,omitempty"`
|
||||
Network Listable[string] `json:"network,omitempty"`
|
||||
Domain Listable[string] `json:"domain,omitempty"`
|
||||
DomainSuffix Listable[string] `json:"domain_suffix,omitempty"`
|
||||
DomainKeyword Listable[string] `json:"domain_keyword,omitempty"`
|
||||
DomainRegex Listable[string] `json:"domain_regex,omitempty"`
|
||||
SourceIPCIDR Listable[string] `json:"source_ip_cidr,omitempty"`
|
||||
IPCIDR Listable[string] `json:"ip_cidr,omitempty"`
|
||||
SourcePort Listable[uint16] `json:"source_port,omitempty"`
|
||||
SourcePortRange Listable[string] `json:"source_port_range,omitempty"`
|
||||
Port Listable[uint16] `json:"port,omitempty"`
|
||||
PortRange Listable[string] `json:"port_range,omitempty"`
|
||||
ProcessName Listable[string] `json:"process_name,omitempty"`
|
||||
ProcessPath Listable[string] `json:"process_path,omitempty"`
|
||||
PackageName Listable[string] `json:"package_name,omitempty"`
|
||||
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
||||
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
|
||||
DomainMatcher *domain.Matcher `json:"-"`
|
||||
SourceIPSet *netipx.IPSet `json:"-"`
|
||||
IPSet *netipx.IPSet `json:"-"`
|
||||
}
|
||||
|
||||
func (r DefaultHeadlessRule) IsValid() bool {
|
||||
var defaultValue DefaultHeadlessRule
|
||||
defaultValue.Invert = r.Invert
|
||||
return !reflect.DeepEqual(r, defaultValue)
|
||||
}
|
||||
|
||||
type LogicalHeadlessRule struct {
|
||||
Mode string `json:"mode"`
|
||||
Rules []HeadlessRule `json:"rules,omitempty"`
|
||||
Invert bool `json:"invert,omitempty"`
|
||||
}
|
||||
|
||||
func (r LogicalHeadlessRule) IsValid() bool {
|
||||
return len(r.Rules) > 0 && common.All(r.Rules, HeadlessRule.IsValid)
|
||||
}
|
||||
|
||||
type _PlainRuleSetCompat struct {
|
||||
Version int `json:"version"`
|
||||
Options PlainRuleSet `json:"-"`
|
||||
}
|
||||
|
||||
type PlainRuleSetCompat _PlainRuleSetCompat
|
||||
|
||||
func (r PlainRuleSetCompat) MarshalJSON() ([]byte, error) {
|
||||
var v any
|
||||
switch r.Version {
|
||||
case C.RuleSetVersion1:
|
||||
v = r.Options
|
||||
default:
|
||||
return nil, E.New("unknown rule set version: ", r.Version)
|
||||
}
|
||||
return MarshallObjects((_PlainRuleSetCompat)(r), v)
|
||||
}
|
||||
|
||||
func (r *PlainRuleSetCompat) UnmarshalJSON(bytes []byte) error {
|
||||
err := json.Unmarshal(bytes, (*_PlainRuleSetCompat)(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var v any
|
||||
switch r.Version {
|
||||
case C.RuleSetVersion1:
|
||||
v = &r.Options
|
||||
case 0:
|
||||
return E.New("missing rule set version")
|
||||
default:
|
||||
return E.New("unknown rule set version: ", r.Version)
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_PlainRuleSetCompat)(r), v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r PlainRuleSetCompat) Upgrade() PlainRuleSet {
|
||||
var result PlainRuleSet
|
||||
switch r.Version {
|
||||
case C.RuleSetVersion1:
|
||||
result = r.Options
|
||||
default:
|
||||
panic("unknown rule set version: " + F.ToString(r.Version))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type PlainRuleSet struct {
|
||||
Rules []HeadlessRule `json:"rules,omitempty"`
|
||||
}
|
226
option/time_unit.go
Normal file
226
option/time_unit.go
Normal file
@ -0,0 +1,226 @@
|
||||
package option
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
const durationDay = 24 * time.Hour
|
||||
|
||||
var unitMap = map[string]uint64{
|
||||
"ns": uint64(time.Nanosecond),
|
||||
"us": uint64(time.Microsecond),
|
||||
"µs": uint64(time.Microsecond), // U+00B5 = micro symbol
|
||||
"μs": uint64(time.Microsecond), // U+03BC = Greek letter mu
|
||||
"ms": uint64(time.Millisecond),
|
||||
"s": uint64(time.Second),
|
||||
"m": uint64(time.Minute),
|
||||
"h": uint64(time.Hour),
|
||||
"d": uint64(durationDay),
|
||||
}
|
||||
|
||||
// ParseDuration parses a duration string.
|
||||
// A duration string is a possibly signed sequence of
|
||||
// decimal numbers, each with optional fraction and a unit suffix,
|
||||
// such as "300ms", "-1.5h" or "2h45m".
|
||||
// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
func ParseDuration(s string) (Duration, error) {
|
||||
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
|
||||
orig := s
|
||||
var d uint64
|
||||
neg := false
|
||||
|
||||
// Consume [-+]?
|
||||
if s != "" {
|
||||
c := s[0]
|
||||
if c == '-' || c == '+' {
|
||||
neg = c == '-'
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
// Special case: if all that is left is "0", this is zero.
|
||||
if s == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
if s == "" {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
}
|
||||
for s != "" {
|
||||
var (
|
||||
v, f uint64 // integers before, after decimal point
|
||||
scale float64 = 1 // value = v + f/scale
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
// The next character must be [0-9.]
|
||||
if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
}
|
||||
// Consume [0-9]*
|
||||
pl := len(s)
|
||||
v, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
}
|
||||
pre := pl != len(s) // whether we consumed anything before a period
|
||||
|
||||
// Consume (\.[0-9]*)?
|
||||
post := false
|
||||
if s != "" && s[0] == '.' {
|
||||
s = s[1:]
|
||||
pl := len(s)
|
||||
f, scale, s = leadingFraction(s)
|
||||
post = pl != len(s)
|
||||
}
|
||||
if !pre && !post {
|
||||
// no digits (e.g. ".s" or "-.s")
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
}
|
||||
|
||||
// Consume unit.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c == '.' || '0' <= c && c <= '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == 0 {
|
||||
return 0, errors.New("time: missing unit in duration " + quote(orig))
|
||||
}
|
||||
u := s[:i]
|
||||
s = s[i:]
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, errors.New("time: unknown unit " + quote(u) + " in duration " + quote(orig))
|
||||
}
|
||||
if v > 1<<63/unit {
|
||||
// overflow
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
}
|
||||
v *= unit
|
||||
if f > 0 {
|
||||
// float64 is needed to be nanosecond accurate for fractions of hours.
|
||||
// v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
|
||||
v += uint64(float64(f) * (float64(unit) / scale))
|
||||
if v > 1<<63 {
|
||||
// overflow
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
}
|
||||
}
|
||||
d += v
|
||||
if d > 1<<63 {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
}
|
||||
}
|
||||
if neg {
|
||||
return -Duration(d), nil
|
||||
}
|
||||
if d > 1<<63-1 {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
}
|
||||
return Duration(d), nil
|
||||
}
|
||||
|
||||
var errLeadingInt = errors.New("time: bad [0-9]*") // never printed
|
||||
|
||||
// leadingInt consumes the leading [0-9]* from s.
|
||||
func leadingInt[bytes []byte | string](s bytes) (x uint64, rem bytes, err error) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
if x > 1<<63/10 {
|
||||
// overflow
|
||||
return 0, rem, errLeadingInt
|
||||
}
|
||||
x = x*10 + uint64(c) - '0'
|
||||
if x > 1<<63 {
|
||||
// overflow
|
||||
return 0, rem, errLeadingInt
|
||||
}
|
||||
}
|
||||
return x, s[i:], nil
|
||||
}
|
||||
|
||||
// leadingFraction consumes the leading [0-9]* from s.
|
||||
// It is used only for fractions, so does not return an error on overflow,
|
||||
// it just stops accumulating precision.
|
||||
func leadingFraction(s string) (x uint64, scale float64, rem string) {
|
||||
i := 0
|
||||
scale = 1
|
||||
overflow := false
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
if overflow {
|
||||
continue
|
||||
}
|
||||
if x > (1<<63-1)/10 {
|
||||
// It's possible for overflow to give a positive number, so take care.
|
||||
overflow = true
|
||||
continue
|
||||
}
|
||||
y := x*10 + uint64(c) - '0'
|
||||
if y > 1<<63 {
|
||||
overflow = true
|
||||
continue
|
||||
}
|
||||
x = y
|
||||
scale *= 10
|
||||
}
|
||||
return x, scale, s[i:]
|
||||
}
|
||||
|
||||
// These are borrowed from unicode/utf8 and strconv and replicate behavior in
|
||||
// that package, since we can't take a dependency on either.
|
||||
const (
|
||||
lowerhex = "0123456789abcdef"
|
||||
runeSelf = 0x80
|
||||
runeError = '\uFFFD'
|
||||
)
|
||||
|
||||
func quote(s string) string {
|
||||
buf := make([]byte, 1, len(s)+2) // slice will be at least len(s) + quotes
|
||||
buf[0] = '"'
|
||||
for i, c := range s {
|
||||
if c >= runeSelf || c < ' ' {
|
||||
// This means you are asking us to parse a time.Duration or
|
||||
// time.Location with unprintable or non-ASCII characters in it.
|
||||
// We don't expect to hit this case very often. We could try to
|
||||
// reproduce strconv.Quote's behavior with full fidelity but
|
||||
// given how rarely we expect to hit these edge cases, speed and
|
||||
// conciseness are better.
|
||||
var width int
|
||||
if c == runeError {
|
||||
width = 1
|
||||
if i+2 < len(s) && s[i:i+3] == string(runeError) {
|
||||
width = 3
|
||||
}
|
||||
} else {
|
||||
width = len(string(c))
|
||||
}
|
||||
for j := 0; j < width; j++ {
|
||||
buf = append(buf, `\x`...)
|
||||
buf = append(buf, lowerhex[s[i+j]>>4])
|
||||
buf = append(buf, lowerhex[s[i+j]&0xF])
|
||||
}
|
||||
} else {
|
||||
if c == '"' || c == '\\' {
|
||||
buf = append(buf, '\\')
|
||||
}
|
||||
buf = append(buf, string(c)...)
|
||||
}
|
||||
}
|
||||
buf = append(buf, '"')
|
||||
return string(buf)
|
||||
}
|
@ -62,7 +62,7 @@ func (o *ACMEDNS01ChallengeOptions) UnmarshalJSON(bytes []byte) error {
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_ACMEDNS01ChallengeOptions)(o), v)
|
||||
if err != nil {
|
||||
return E.Cause(err, "DNS01 challenge options")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ func (d *Duration) UnmarshalJSON(bytes []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
duration, err := time.ParseDuration(value)
|
||||
duration, err := ParseDuration(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -174,6 +174,14 @@ func (d *Duration) UnmarshalJSON(bytes []byte) error {
|
||||
|
||||
type DNSQueryType uint16
|
||||
|
||||
func (t DNSQueryType) String() string {
|
||||
typeName, loaded := mDNS.TypeToString[uint16(t)]
|
||||
if loaded {
|
||||
return typeName
|
||||
}
|
||||
return F.ToString(uint16(t))
|
||||
}
|
||||
|
||||
func (t DNSQueryType) MarshalJSON() ([]byte, error) {
|
||||
typeName, loaded := mDNS.TypeToString[uint16(t)]
|
||||
if loaded {
|
||||
|
@ -1,13 +1 @@
|
||||
package option
|
||||
|
||||
type V2RayAPIOptions struct {
|
||||
Listen string `json:"listen,omitempty"`
|
||||
Stats *V2RayStatsServiceOptions `json:"stats,omitempty"`
|
||||
}
|
||||
|
||||
type V2RayStatsServiceOptions struct {
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
Inbounds []string `json:"inbounds,omitempty"`
|
||||
Outbounds []string `json:"outbounds,omitempty"`
|
||||
Users []string `json:"users,omitempty"`
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ func (o *V2RayTransportOptions) UnmarshalJSON(bytes []byte) error {
|
||||
}
|
||||
err = UnmarshallExcluded(bytes, (*_V2RayTransportOptions)(o), v)
|
||||
if err != nil {
|
||||
return E.Cause(err, "vmess transport options")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func New(ctx context.Context, router adapter.Router, logger log.ContextLogger, t
|
||||
case C.TypeHysteria2:
|
||||
return NewHysteria2(ctx, router, logger, tag, options.Hysteria2Options)
|
||||
case C.TypeSelector:
|
||||
return NewSelector(router, logger, tag, options.SelectorOptions)
|
||||
return NewSelector(ctx, router, logger, tag, options.SelectorOptions)
|
||||
case C.TypeURLTest:
|
||||
return NewURLTest(ctx, router, logger, tag, options.URLTestOptions)
|
||||
default:
|
||||
|
@ -194,9 +194,7 @@ func (d *DNS) newPacketConnection(ctx context.Context, conn N.PacketConn, readWa
|
||||
group.Append0(func(ctx context.Context) error {
|
||||
var buffer *buf.Buffer
|
||||
readWaiter.InitializeReadWaiter(func() *buf.Buffer {
|
||||
buffer = buf.NewSize(dns.FixedPacketSize)
|
||||
buffer.FullReset()
|
||||
return buffer
|
||||
return buf.NewSize(dns.FixedPacketSize)
|
||||
})
|
||||
defer readWaiter.InitializeReadWaiter(nil)
|
||||
for {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
M "github.com/sagernet/sing/common/metadata"
|
||||
N "github.com/sagernet/sing/common/network"
|
||||
"github.com/sagernet/sing/service"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -21,6 +22,7 @@ var (
|
||||
|
||||
type Selector struct {
|
||||
myOutboundAdapter
|
||||
ctx context.Context
|
||||
tags []string
|
||||
defaultTag string
|
||||
outbounds map[string]adapter.Outbound
|
||||
@ -29,7 +31,7 @@ type Selector struct {
|
||||
interruptExternalConnections bool
|
||||
}
|
||||
|
||||
func NewSelector(router adapter.Router, logger log.ContextLogger, tag string, options option.SelectorOutboundOptions) (*Selector, error) {
|
||||
func NewSelector(ctx context.Context, router adapter.Router, logger log.ContextLogger, tag string, options option.SelectorOutboundOptions) (*Selector, error) {
|
||||
outbound := &Selector{
|
||||
myOutboundAdapter: myOutboundAdapter{
|
||||
protocol: C.TypeSelector,
|
||||
@ -38,6 +40,7 @@ func NewSelector(router adapter.Router, logger log.ContextLogger, tag string, op
|
||||
tag: tag,
|
||||
dependencies: options.Outbounds,
|
||||
},
|
||||
ctx: ctx,
|
||||
tags: options.Outbounds,
|
||||
defaultTag: options.Default,
|
||||
outbounds: make(map[string]adapter.Outbound),
|
||||
@ -67,8 +70,9 @@ func (s *Selector) Start() error {
|
||||
}
|
||||
|
||||
if s.tag != "" {
|
||||
if clashServer := s.router.ClashServer(); clashServer != nil && clashServer.StoreSelected() {
|
||||
selected := clashServer.CacheFile().LoadSelected(s.tag)
|
||||
cacheFile := service.FromContext[adapter.CacheFile](s.ctx)
|
||||
if cacheFile != nil {
|
||||
selected := cacheFile.LoadSelected(s.tag)
|
||||
if selected != "" {
|
||||
detour, loaded := s.outbounds[selected]
|
||||
if loaded {
|
||||
@ -110,8 +114,9 @@ func (s *Selector) SelectOutbound(tag string) bool {
|
||||
}
|
||||
s.selected = detour
|
||||
if s.tag != "" {
|
||||
if clashServer := s.router.ClashServer(); clashServer != nil && clashServer.StoreSelected() {
|
||||
err := clashServer.CacheFile().StoreSelected(s.tag, tag)
|
||||
cacheFile := service.FromContext[adapter.CacheFile](s.ctx)
|
||||
if cacheFile != nil {
|
||||
err := cacheFile.StoreSelected(s.tag, tag)
|
||||
if err != nil {
|
||||
s.logger.Error("store selected: ", err)
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
M "github.com/sagernet/sing/common/metadata"
|
||||
N "github.com/sagernet/sing/common/network"
|
||||
serviceNTP "github.com/sagernet/sing/common/ntp"
|
||||
"github.com/sagernet/sing/common/task"
|
||||
"github.com/sagernet/sing/common/uot"
|
||||
"github.com/sagernet/sing/service"
|
||||
"github.com/sagernet/sing/service/pause"
|
||||
@ -67,6 +68,8 @@ type Router struct {
|
||||
dnsClient *dns.Client
|
||||
defaultDomainStrategy dns.DomainStrategy
|
||||
dnsRules []adapter.DNSRule
|
||||
ruleSets []adapter.RuleSet
|
||||
ruleSetMap map[string]adapter.RuleSet
|
||||
defaultTransport dns.Transport
|
||||
transports []dns.Transport
|
||||
transportMap map[string]dns.Transport
|
||||
@ -106,6 +109,7 @@ func NewRouter(
|
||||
outboundByTag: make(map[string]adapter.Outbound),
|
||||
rules: make([]adapter.Rule, 0, len(options.Rules)),
|
||||
dnsRules: make([]adapter.DNSRule, 0, len(dnsOptions.Rules)),
|
||||
ruleSetMap: make(map[string]adapter.RuleSet),
|
||||
needGeoIPDatabase: hasRule(options.Rules, isGeoIPRule) || hasDNSRule(dnsOptions.Rules, isGeoIPDNSRule),
|
||||
needGeositeDatabase: hasRule(options.Rules, isGeositeRule) || hasDNSRule(dnsOptions.Rules, isGeositeDNSRule),
|
||||
geoIPOptions: common.PtrValueOrDefault(options.GeoIP),
|
||||
@ -127,19 +131,27 @@ func NewRouter(
|
||||
Logger: router.dnsLogger,
|
||||
})
|
||||
for i, ruleOptions := range options.Rules {
|
||||
routeRule, err := NewRule(router, router.logger, ruleOptions)
|
||||
routeRule, err := NewRule(router, router.logger, ruleOptions, true)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "parse rule[", i, "]")
|
||||
}
|
||||
router.rules = append(router.rules, routeRule)
|
||||
}
|
||||
for i, dnsRuleOptions := range dnsOptions.Rules {
|
||||
dnsRule, err := NewDNSRule(router, router.logger, dnsRuleOptions)
|
||||
dnsRule, err := NewDNSRule(router, router.logger, dnsRuleOptions, true)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "parse dns rule[", i, "]")
|
||||
}
|
||||
router.dnsRules = append(router.dnsRules, dnsRule)
|
||||
}
|
||||
for i, ruleSetOptions := range options.RuleSet {
|
||||
ruleSet, err := NewRuleSet(ctx, router, router.logger, ruleSetOptions)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "parse rule-set[", i, "]")
|
||||
}
|
||||
router.ruleSets = append(router.ruleSets, ruleSet)
|
||||
router.ruleSetMap[ruleSetOptions.Tag] = ruleSet
|
||||
}
|
||||
|
||||
transports := make([]dns.Transport, len(dnsOptions.Servers))
|
||||
dummyTransportMap := make(map[string]dns.Transport)
|
||||
@ -243,6 +255,9 @@ func NewRouter(
|
||||
}
|
||||
defaultTransport = transports[0]
|
||||
}
|
||||
if _, isFakeIP := defaultTransport.(adapter.FakeIPTransport); isFakeIP {
|
||||
return nil, E.New("default DNS server cannot be fakeip")
|
||||
}
|
||||
router.defaultTransport = defaultTransport
|
||||
router.transports = transports
|
||||
router.transportMap = transportMap
|
||||
@ -261,7 +276,7 @@ func NewRouter(
|
||||
if fakeIPOptions.Inet6Range != nil {
|
||||
inet6Range = *fakeIPOptions.Inet6Range
|
||||
}
|
||||
router.fakeIPStore = fakeip.NewStore(router, router.logger, inet4Range, inet6Range)
|
||||
router.fakeIPStore = fakeip.NewStore(ctx, router.logger, inet4Range, inet6Range)
|
||||
}
|
||||
|
||||
usePlatformDefaultInterfaceMonitor := platformInterface != nil && platformInterface.UsePlatformDefaultInterfaceMonitor()
|
||||
@ -479,6 +494,33 @@ func (r *Router) Start() error {
|
||||
if r.needWIFIState {
|
||||
r.updateWIFIState()
|
||||
}
|
||||
if r.fakeIPStore != nil {
|
||||
err := r.fakeIPStore.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(r.ruleSets) > 0 {
|
||||
ruleSetStartContext := NewRuleSetStartContext()
|
||||
var ruleSetStartGroup task.Group
|
||||
for i, ruleSet := range r.ruleSets {
|
||||
ruleSetInPlace := ruleSet
|
||||
ruleSetStartGroup.Append0(func(ctx context.Context) error {
|
||||
err := ruleSetInPlace.StartContext(ctx, ruleSetStartContext)
|
||||
if err != nil {
|
||||
return E.Cause(err, "initialize rule-set[", i, "]")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
ruleSetStartGroup.Concurrency(5)
|
||||
ruleSetStartGroup.FastFail()
|
||||
err := ruleSetStartGroup.Run(r.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ruleSetStartContext.Close()
|
||||
}
|
||||
for i, rule := range r.rules {
|
||||
err := rule.Start()
|
||||
if err != nil {
|
||||
@ -491,12 +533,6 @@ func (r *Router) Start() error {
|
||||
return E.Cause(err, "initialize DNS rule[", i, "]")
|
||||
}
|
||||
}
|
||||
if r.fakeIPStore != nil {
|
||||
err := r.fakeIPStore.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i, transport := range r.transports {
|
||||
err := transport.Start()
|
||||
if err != nil {
|
||||
@ -512,6 +548,18 @@ func (r *Router) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Router) PostStart() error {
|
||||
if len(r.ruleSets) > 0 {
|
||||
for i, ruleSet := range r.ruleSets {
|
||||
err := ruleSet.PostStart()
|
||||
if err != nil {
|
||||
return E.Cause(err, "post start rule-set[", i, "]")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Router) Close() error {
|
||||
var err error
|
||||
for i, rule := range r.rules {
|
||||
@ -576,11 +624,17 @@ func (r *Router) Outbound(tag string) (adapter.Outbound, bool) {
|
||||
return outbound, loaded
|
||||
}
|
||||
|
||||
func (r *Router) DefaultOutbound(network string) adapter.Outbound {
|
||||
func (r *Router) DefaultOutbound(network string) (adapter.Outbound, error) {
|
||||
if network == N.NetworkTCP {
|
||||
return r.defaultOutboundForConnection
|
||||
if r.defaultOutboundForConnection == nil {
|
||||
return nil, E.New("missing default outbound for TCP connections")
|
||||
}
|
||||
return r.defaultOutboundForConnection, nil
|
||||
} else {
|
||||
return r.defaultOutboundForPacketConnection
|
||||
if r.defaultOutboundForPacketConnection == nil {
|
||||
return nil, E.New("missing default outbound for UDP connections")
|
||||
}
|
||||
return r.defaultOutboundForPacketConnection, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -588,6 +642,11 @@ func (r *Router) FakeIPStore() adapter.FakeIPStore {
|
||||
return r.fakeIPStore
|
||||
}
|
||||
|
||||
func (r *Router) RuleSet(tag string) (adapter.RuleSet, bool) {
|
||||
ruleSet, loaded := r.ruleSetMap[tag]
|
||||
return ruleSet, loaded
|
||||
}
|
||||
|
||||
func (r *Router) RouteConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext) error {
|
||||
if metadata.InboundDetour != "" {
|
||||
if metadata.LastInbound == metadata.InboundDetour {
|
||||
@ -646,7 +705,6 @@ func (r *Router) RouteConnection(ctx context.Context, conn net.Conn, metadata ad
|
||||
|
||||
if metadata.InboundOptions.SniffEnabled {
|
||||
buffer := buf.NewPacket()
|
||||
buffer.FullReset()
|
||||
sniffMetadata, err := sniff.PeekStream(ctx, conn, buffer, time.Duration(metadata.InboundOptions.SniffTimeout), sniff.StreamDomainNameQuery, sniff.TLSClientHello, sniff.HTTPHost)
|
||||
if sniffMetadata != nil {
|
||||
metadata.Protocol = sniffMetadata.Protocol
|
||||
@ -762,7 +820,6 @@ func (r *Router) RoutePacketConnection(ctx context.Context, conn N.PacketConn, m
|
||||
|
||||
if metadata.InboundOptions.SniffEnabled || metadata.Destination.Addr.IsUnspecified() {
|
||||
buffer := buf.NewPacket()
|
||||
buffer.FullReset()
|
||||
destination, err := conn.ReadPacket(buffer)
|
||||
if err != nil {
|
||||
buffer.Release()
|
||||
@ -878,6 +935,7 @@ func (r *Router) match0(ctx context.Context, metadata *adapter.InboundContext, d
|
||||
}
|
||||
}
|
||||
for i, rule := range r.rules {
|
||||
metadata.ResetRuleCache()
|
||||
if rule.Match(metadata) {
|
||||
detour := rule.Outbound()
|
||||
r.logger.DebugContext(ctx, "match[", i, "] ", rule.String(), " => ", detour)
|
||||
|
@ -37,12 +37,13 @@ func (m *DNSReverseMapping) Query(address netip.Addr) (string, bool) {
|
||||
return domain, loaded
|
||||
}
|
||||
|
||||
func (r *Router) matchDNS(ctx context.Context) (context.Context, dns.Transport, dns.DomainStrategy) {
|
||||
func (r *Router) matchDNS(ctx context.Context, allowFakeIP bool) (context.Context, dns.Transport, dns.DomainStrategy) {
|
||||
metadata := adapter.ContextFrom(ctx)
|
||||
if metadata == nil {
|
||||
panic("no context")
|
||||
}
|
||||
for i, rule := range r.dnsRules {
|
||||
metadata.ResetRuleCache()
|
||||
if rule.Match(metadata) {
|
||||
detour := rule.Outbound()
|
||||
transport, loaded := r.transportMap[detour]
|
||||
@ -50,7 +51,7 @@ func (r *Router) matchDNS(ctx context.Context) (context.Context, dns.Transport,
|
||||
r.dnsLogger.ErrorContext(ctx, "transport not found: ", detour)
|
||||
continue
|
||||
}
|
||||
if _, isFakeIP := transport.(adapter.FakeIPTransport); isFakeIP && metadata.FakeIP {
|
||||
if _, isFakeIP := transport.(adapter.FakeIPTransport); isFakeIP && !allowFakeIP {
|
||||
continue
|
||||
}
|
||||
r.dnsLogger.DebugContext(ctx, "match[", i, "] ", rule.String(), " => ", detour)
|
||||
@ -96,7 +97,7 @@ func (r *Router) Exchange(ctx context.Context, message *mDNS.Msg) (*mDNS.Msg, er
|
||||
}
|
||||
metadata.Domain = fqdnToDomain(message.Question[0].Name)
|
||||
}
|
||||
ctx, transport, strategy := r.matchDNS(ctx)
|
||||
ctx, transport, strategy := r.matchDNS(ctx, true)
|
||||
ctx, cancel := context.WithTimeout(ctx, C.DNSTimeout)
|
||||
defer cancel()
|
||||
response, err = r.dnsClient.Exchange(ctx, transport, message, strategy)
|
||||
@ -124,7 +125,7 @@ func (r *Router) Lookup(ctx context.Context, domain string, strategy dns.DomainS
|
||||
r.dnsLogger.DebugContext(ctx, "lookup domain ", domain)
|
||||
ctx, metadata := adapter.AppendContext(ctx)
|
||||
metadata.Domain = domain
|
||||
ctx, transport, transportStrategy := r.matchDNS(ctx)
|
||||
ctx, transport, transportStrategy := r.matchDNS(ctx, false)
|
||||
if strategy == dns.DomainStrategyAsIS {
|
||||
strategy = transportStrategy
|
||||
}
|
||||
|
@ -252,10 +252,8 @@ func hasRule(rules []option.Rule, cond func(rule option.DefaultRule) bool) bool
|
||||
return true
|
||||
}
|
||||
case C.RuleTypeLogical:
|
||||
for _, subRule := range rule.LogicalOptions.Rules {
|
||||
if cond(subRule) {
|
||||
return true
|
||||
}
|
||||
if hasRule(rule.LogicalOptions.Rules, cond) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -270,10 +268,8 @@ func hasDNSRule(rules []option.DNSRule, cond func(rule option.DefaultDNSRule) bo
|
||||
return true
|
||||
}
|
||||
case C.RuleTypeLogical:
|
||||
for _, subRule := range rule.LogicalOptions.Rules {
|
||||
if cond(subRule) {
|
||||
return true
|
||||
}
|
||||
if hasDNSRule(rule.LogicalOptions.Rules, cond) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package route
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/sagernet/sing-box/adapter"
|
||||
@ -16,6 +17,7 @@ type abstractDefaultRule struct {
|
||||
destinationAddressItems []RuleItem
|
||||
destinationPortItems []RuleItem
|
||||
allItems []RuleItem
|
||||
ruleSetItem RuleItem
|
||||
invert bool
|
||||
outbound string
|
||||
}
|
||||
@ -61,62 +63,62 @@ func (r *abstractDefaultRule) Match(metadata *adapter.InboundContext) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(r.sourceAddressItems) > 0 && !metadata.SourceAddressMatch {
|
||||
for _, item := range r.sourceAddressItems {
|
||||
if item.Match(metadata) {
|
||||
metadata.SourceAddressMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(r.sourcePortItems) > 0 && !metadata.SourceAddressMatch {
|
||||
for _, item := range r.sourcePortItems {
|
||||
if item.Match(metadata) {
|
||||
metadata.SourcePortMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(r.destinationAddressItems) > 0 && !metadata.SourceAddressMatch {
|
||||
for _, item := range r.destinationAddressItems {
|
||||
if item.Match(metadata) {
|
||||
metadata.DestinationAddressMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(r.destinationPortItems) > 0 && !metadata.SourceAddressMatch {
|
||||
for _, item := range r.destinationPortItems {
|
||||
if item.Match(metadata) {
|
||||
metadata.DestinationPortMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, item := range r.items {
|
||||
if !item.Match(metadata) {
|
||||
return r.invert
|
||||
}
|
||||
}
|
||||
|
||||
if len(r.sourceAddressItems) > 0 {
|
||||
var sourceAddressMatch bool
|
||||
for _, item := range r.sourceAddressItems {
|
||||
if item.Match(metadata) {
|
||||
sourceAddressMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !sourceAddressMatch {
|
||||
return r.invert
|
||||
}
|
||||
if len(r.sourceAddressItems) > 0 && !metadata.SourceAddressMatch {
|
||||
return r.invert
|
||||
}
|
||||
|
||||
if len(r.sourcePortItems) > 0 {
|
||||
var sourcePortMatch bool
|
||||
for _, item := range r.sourcePortItems {
|
||||
if item.Match(metadata) {
|
||||
sourcePortMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !sourcePortMatch {
|
||||
return r.invert
|
||||
}
|
||||
if len(r.sourcePortItems) > 0 && !metadata.SourcePortMatch {
|
||||
return r.invert
|
||||
}
|
||||
|
||||
if len(r.destinationAddressItems) > 0 {
|
||||
var destinationAddressMatch bool
|
||||
for _, item := range r.destinationAddressItems {
|
||||
if item.Match(metadata) {
|
||||
destinationAddressMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !destinationAddressMatch {
|
||||
return r.invert
|
||||
}
|
||||
if len(r.destinationAddressItems) > 0 && !metadata.DestinationAddressMatch {
|
||||
return r.invert
|
||||
}
|
||||
|
||||
if len(r.destinationPortItems) > 0 {
|
||||
var destinationPortMatch bool
|
||||
for _, item := range r.destinationPortItems {
|
||||
if item.Match(metadata) {
|
||||
destinationPortMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !destinationPortMatch {
|
||||
return r.invert
|
||||
}
|
||||
if len(r.destinationPortItems) > 0 && !metadata.DestinationPortMatch {
|
||||
return r.invert
|
||||
}
|
||||
|
||||
return !r.invert
|
||||
@ -135,7 +137,7 @@ func (r *abstractDefaultRule) String() string {
|
||||
}
|
||||
|
||||
type abstractLogicalRule struct {
|
||||
rules []adapter.Rule
|
||||
rules []adapter.HeadlessRule
|
||||
mode string
|
||||
invert bool
|
||||
outbound string
|
||||
@ -146,7 +148,10 @@ func (r *abstractLogicalRule) Type() string {
|
||||
}
|
||||
|
||||
func (r *abstractLogicalRule) UpdateGeosite() error {
|
||||
for _, rule := range r.rules {
|
||||
for _, rule := range common.FilterIsInstance(r.rules, func(it adapter.HeadlessRule) (adapter.Rule, bool) {
|
||||
rule, loaded := it.(adapter.Rule)
|
||||
return rule, loaded
|
||||
}) {
|
||||
err := rule.UpdateGeosite()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -156,7 +161,10 @@ func (r *abstractLogicalRule) UpdateGeosite() error {
|
||||
}
|
||||
|
||||
func (r *abstractLogicalRule) Start() error {
|
||||
for _, rule := range r.rules {
|
||||
for _, rule := range common.FilterIsInstance(r.rules, func(it adapter.HeadlessRule) (common.Starter, bool) {
|
||||
rule, loaded := it.(common.Starter)
|
||||
return rule, loaded
|
||||
}) {
|
||||
err := rule.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -166,7 +174,10 @@ func (r *abstractLogicalRule) Start() error {
|
||||
}
|
||||
|
||||
func (r *abstractLogicalRule) Close() error {
|
||||
for _, rule := range r.rules {
|
||||
for _, rule := range common.FilterIsInstance(r.rules, func(it adapter.HeadlessRule) (io.Closer, bool) {
|
||||
rule, loaded := it.(io.Closer)
|
||||
return rule, loaded
|
||||
}) {
|
||||
err := rule.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -177,11 +188,13 @@ func (r *abstractLogicalRule) Close() error {
|
||||
|
||||
func (r *abstractLogicalRule) Match(metadata *adapter.InboundContext) bool {
|
||||
if r.mode == C.LogicalTypeAnd {
|
||||
return common.All(r.rules, func(it adapter.Rule) bool {
|
||||
return common.All(r.rules, func(it adapter.HeadlessRule) bool {
|
||||
metadata.ResetRuleCache()
|
||||
return it.Match(metadata)
|
||||
}) != r.invert
|
||||
} else {
|
||||
return common.Any(r.rules, func(it adapter.Rule) bool {
|
||||
return common.Any(r.rules, func(it adapter.HeadlessRule) bool {
|
||||
metadata.ResetRuleCache()
|
||||
return it.Match(metadata)
|
||||
}) != r.invert
|
||||
}
|
||||
|
@ -8,13 +8,13 @@ import (
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
)
|
||||
|
||||
func NewRule(router adapter.Router, logger log.ContextLogger, options option.Rule) (adapter.Rule, error) {
|
||||
func NewRule(router adapter.Router, logger log.ContextLogger, options option.Rule, checkOutbound bool) (adapter.Rule, error) {
|
||||
switch options.Type {
|
||||
case "", C.RuleTypeDefault:
|
||||
if !options.DefaultOptions.IsValid() {
|
||||
return nil, E.New("missing conditions")
|
||||
}
|
||||
if options.DefaultOptions.Outbound == "" {
|
||||
if options.DefaultOptions.Outbound == "" && checkOutbound {
|
||||
return nil, E.New("missing outbound field")
|
||||
}
|
||||
return NewDefaultRule(router, logger, options.DefaultOptions)
|
||||
@ -22,7 +22,7 @@ func NewRule(router adapter.Router, logger log.ContextLogger, options option.Rul
|
||||
if !options.LogicalOptions.IsValid() {
|
||||
return nil, E.New("missing conditions")
|
||||
}
|
||||
if options.LogicalOptions.Outbound == "" {
|
||||
if options.LogicalOptions.Outbound == "" && checkOutbound {
|
||||
return nil, E.New("missing outbound field")
|
||||
}
|
||||
return NewLogicalRule(router, logger, options.LogicalOptions)
|
||||
@ -120,6 +120,11 @@ func NewDefaultRule(router adapter.Router, logger log.ContextLogger, options opt
|
||||
rule.sourceAddressItems = append(rule.sourceAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if options.SourceIPIsPrivate {
|
||||
item := NewIPIsPrivateItem(true)
|
||||
rule.sourceAddressItems = append(rule.sourceAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.IPCIDR) > 0 {
|
||||
item, err := NewIPCIDRItem(false, options.IPCIDR)
|
||||
if err != nil {
|
||||
@ -128,6 +133,11 @@ func NewDefaultRule(router adapter.Router, logger log.ContextLogger, options opt
|
||||
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if options.IPIsPrivate {
|
||||
item := NewIPIsPrivateItem(false)
|
||||
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.SourcePort) > 0 {
|
||||
item := NewPortItem(true, options.SourcePort)
|
||||
rule.sourcePortItems = append(rule.sourcePortItems, item)
|
||||
@ -194,6 +204,11 @@ func NewDefaultRule(router adapter.Router, logger log.ContextLogger, options opt
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.RuleSet) > 0 {
|
||||
item := NewRuleSetItem(router, options.RuleSet, options.RuleSetIPCIDRMatchSource)
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
@ -206,7 +221,7 @@ type LogicalRule struct {
|
||||
func NewLogicalRule(router adapter.Router, logger log.ContextLogger, options option.LogicalRule) (*LogicalRule, error) {
|
||||
r := &LogicalRule{
|
||||
abstractLogicalRule{
|
||||
rules: make([]adapter.Rule, len(options.Rules)),
|
||||
rules: make([]adapter.HeadlessRule, len(options.Rules)),
|
||||
invert: options.Invert,
|
||||
outbound: options.Outbound,
|
||||
},
|
||||
@ -220,7 +235,7 @@ func NewLogicalRule(router adapter.Router, logger log.ContextLogger, options opt
|
||||
return nil, E.New("unknown logical mode: ", options.Mode)
|
||||
}
|
||||
for i, subRule := range options.Rules {
|
||||
rule, err := NewDefaultRule(router, logger, subRule)
|
||||
rule, err := NewRule(router, logger, subRule, false)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "sub rule[", i, "]")
|
||||
}
|
||||
|
@ -8,13 +8,13 @@ import (
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
)
|
||||
|
||||
func NewDNSRule(router adapter.Router, logger log.ContextLogger, options option.DNSRule) (adapter.DNSRule, error) {
|
||||
func NewDNSRule(router adapter.Router, logger log.ContextLogger, options option.DNSRule, checkServer bool) (adapter.DNSRule, error) {
|
||||
switch options.Type {
|
||||
case "", C.RuleTypeDefault:
|
||||
if !options.DefaultOptions.IsValid() {
|
||||
return nil, E.New("missing conditions")
|
||||
}
|
||||
if options.DefaultOptions.Server == "" {
|
||||
if options.DefaultOptions.Server == "" && checkServer {
|
||||
return nil, E.New("missing server field")
|
||||
}
|
||||
return NewDefaultDNSRule(router, logger, options.DefaultOptions)
|
||||
@ -22,7 +22,7 @@ func NewDNSRule(router adapter.Router, logger log.ContextLogger, options option.
|
||||
if !options.LogicalOptions.IsValid() {
|
||||
return nil, E.New("missing conditions")
|
||||
}
|
||||
if options.LogicalOptions.Server == "" {
|
||||
if options.LogicalOptions.Server == "" && checkServer {
|
||||
return nil, E.New("missing server field")
|
||||
}
|
||||
return NewLogicalDNSRule(router, logger, options.LogicalOptions)
|
||||
@ -119,6 +119,11 @@ func NewDefaultDNSRule(router adapter.Router, logger log.ContextLogger, options
|
||||
rule.sourceAddressItems = append(rule.sourceAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if options.SourceIPIsPrivate {
|
||||
item := NewIPIsPrivateItem(true)
|
||||
rule.sourceAddressItems = append(rule.sourceAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.SourcePort) > 0 {
|
||||
item := NewPortItem(true, options.SourcePort)
|
||||
rule.sourcePortItems = append(rule.sourcePortItems, item)
|
||||
@ -190,6 +195,11 @@ func NewDefaultDNSRule(router adapter.Router, logger log.ContextLogger, options
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.RuleSet) > 0 {
|
||||
item := NewRuleSetItem(router, options.RuleSet, false)
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
@ -212,7 +222,7 @@ type LogicalDNSRule struct {
|
||||
func NewLogicalDNSRule(router adapter.Router, logger log.ContextLogger, options option.LogicalDNSRule) (*LogicalDNSRule, error) {
|
||||
r := &LogicalDNSRule{
|
||||
abstractLogicalRule: abstractLogicalRule{
|
||||
rules: make([]adapter.Rule, len(options.Rules)),
|
||||
rules: make([]adapter.HeadlessRule, len(options.Rules)),
|
||||
invert: options.Invert,
|
||||
outbound: options.Server,
|
||||
},
|
||||
@ -228,7 +238,7 @@ func NewLogicalDNSRule(router adapter.Router, logger log.ContextLogger, options
|
||||
return nil, E.New("unknown logical mode: ", options.Mode)
|
||||
}
|
||||
for i, subRule := range options.Rules {
|
||||
rule, err := NewDefaultDNSRule(router, logger, subRule)
|
||||
rule, err := NewDNSRule(router, logger, subRule, false)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "sub rule[", i, "]")
|
||||
}
|
||||
|
173
route/rule_headless.go
Normal file
173
route/rule_headless.go
Normal file
@ -0,0 +1,173 @@
|
||||
package route
|
||||
|
||||
import (
|
||||
"github.com/sagernet/sing-box/adapter"
|
||||
C "github.com/sagernet/sing-box/constant"
|
||||
"github.com/sagernet/sing-box/option"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
)
|
||||
|
||||
func NewHeadlessRule(router adapter.Router, options option.HeadlessRule) (adapter.HeadlessRule, error) {
|
||||
switch options.Type {
|
||||
case "", C.RuleTypeDefault:
|
||||
if !options.DefaultOptions.IsValid() {
|
||||
return nil, E.New("missing conditions")
|
||||
}
|
||||
return NewDefaultHeadlessRule(router, options.DefaultOptions)
|
||||
case C.RuleTypeLogical:
|
||||
if !options.LogicalOptions.IsValid() {
|
||||
return nil, E.New("missing conditions")
|
||||
}
|
||||
return NewLogicalHeadlessRule(router, options.LogicalOptions)
|
||||
default:
|
||||
return nil, E.New("unknown rule type: ", options.Type)
|
||||
}
|
||||
}
|
||||
|
||||
var _ adapter.HeadlessRule = (*DefaultHeadlessRule)(nil)
|
||||
|
||||
type DefaultHeadlessRule struct {
|
||||
abstractDefaultRule
|
||||
}
|
||||
|
||||
func NewDefaultHeadlessRule(router adapter.Router, options option.DefaultHeadlessRule) (*DefaultHeadlessRule, error) {
|
||||
rule := &DefaultHeadlessRule{
|
||||
abstractDefaultRule{
|
||||
invert: options.Invert,
|
||||
},
|
||||
}
|
||||
if len(options.Network) > 0 {
|
||||
item := NewNetworkItem(options.Network)
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.Domain) > 0 || len(options.DomainSuffix) > 0 {
|
||||
item := NewDomainItem(options.Domain, options.DomainSuffix)
|
||||
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
} else if options.DomainMatcher != nil {
|
||||
item := NewRawDomainItem(options.DomainMatcher)
|
||||
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.DomainKeyword) > 0 {
|
||||
item := NewDomainKeywordItem(options.DomainKeyword)
|
||||
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.DomainRegex) > 0 {
|
||||
item, err := NewDomainRegexItem(options.DomainRegex)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "domain_regex")
|
||||
}
|
||||
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.SourceIPCIDR) > 0 {
|
||||
item, err := NewIPCIDRItem(true, options.SourceIPCIDR)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "source_ipcidr")
|
||||
}
|
||||
rule.sourceAddressItems = append(rule.sourceAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
} else if options.SourceIPSet != nil {
|
||||
item := NewRawIPCIDRItem(true, options.SourceIPSet)
|
||||
rule.sourceAddressItems = append(rule.sourceAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.IPCIDR) > 0 {
|
||||
item, err := NewIPCIDRItem(false, options.IPCIDR)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "ipcidr")
|
||||
}
|
||||
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
} else if options.IPSet != nil {
|
||||
item := NewRawIPCIDRItem(false, options.IPSet)
|
||||
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.SourcePort) > 0 {
|
||||
item := NewPortItem(true, options.SourcePort)
|
||||
rule.sourcePortItems = append(rule.sourcePortItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.SourcePortRange) > 0 {
|
||||
item, err := NewPortRangeItem(true, options.SourcePortRange)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "source_port_range")
|
||||
}
|
||||
rule.sourcePortItems = append(rule.sourcePortItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.Port) > 0 {
|
||||
item := NewPortItem(false, options.Port)
|
||||
rule.destinationPortItems = append(rule.destinationPortItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.PortRange) > 0 {
|
||||
item, err := NewPortRangeItem(false, options.PortRange)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "port_range")
|
||||
}
|
||||
rule.destinationPortItems = append(rule.destinationPortItems, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.ProcessName) > 0 {
|
||||
item := NewProcessItem(options.ProcessName)
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.ProcessPath) > 0 {
|
||||
item := NewProcessPathItem(options.ProcessPath)
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.PackageName) > 0 {
|
||||
item := NewPackageNameItem(options.PackageName)
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.WIFISSID) > 0 {
|
||||
item := NewWIFISSIDItem(router, options.WIFISSID)
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
if len(options.WIFIBSSID) > 0 {
|
||||
item := NewWIFIBSSIDItem(router, options.WIFIBSSID)
|
||||
rule.items = append(rule.items, item)
|
||||
rule.allItems = append(rule.allItems, item)
|
||||
}
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
var _ adapter.HeadlessRule = (*LogicalHeadlessRule)(nil)
|
||||
|
||||
type LogicalHeadlessRule struct {
|
||||
abstractLogicalRule
|
||||
}
|
||||
|
||||
func NewLogicalHeadlessRule(router adapter.Router, options option.LogicalHeadlessRule) (*LogicalHeadlessRule, error) {
|
||||
r := &LogicalHeadlessRule{
|
||||
abstractLogicalRule{
|
||||
rules: make([]adapter.HeadlessRule, len(options.Rules)),
|
||||
invert: options.Invert,
|
||||
},
|
||||
}
|
||||
switch options.Mode {
|
||||
case C.LogicalTypeAnd:
|
||||
r.mode = C.LogicalTypeAnd
|
||||
case C.LogicalTypeOr:
|
||||
r.mode = C.LogicalTypeOr
|
||||
default:
|
||||
return nil, E.New("unknown logical mode: ", options.Mode)
|
||||
}
|
||||
for i, subRule := range options.Rules {
|
||||
rule, err := NewHeadlessRule(router, subRule)
|
||||
if err != nil {
|
||||
return nil, E.Cause(err, "sub rule[", i, "]")
|
||||
}
|
||||
r.rules[i] = rule
|
||||
}
|
||||
return r, nil
|
||||
}
|
@ -31,7 +31,7 @@ func NewIPCIDRItem(isSource bool, prefixStrings []string) (*IPCIDRItem, error) {
|
||||
builder.Add(addr)
|
||||
continue
|
||||
}
|
||||
return nil, E.Cause(err, "parse ip_cidr [", i, "]")
|
||||
return nil, E.Cause(err, "parse [", i, "]")
|
||||
}
|
||||
var description string
|
||||
if isSource {
|
||||
@ -57,8 +57,23 @@ func NewIPCIDRItem(isSource bool, prefixStrings []string) (*IPCIDRItem, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewRawIPCIDRItem(isSource bool, ipSet *netipx.IPSet) *IPCIDRItem {
|
||||
var description string
|
||||
if isSource {
|
||||
description = "source_ipcidr="
|
||||
} else {
|
||||
description = "ipcidr="
|
||||
}
|
||||
description += "<binary>"
|
||||
return &IPCIDRItem{
|
||||
ipSet: ipSet,
|
||||
isSource: isSource,
|
||||
description: description,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *IPCIDRItem) Match(metadata *adapter.InboundContext) bool {
|
||||
if r.isSource {
|
||||
if r.isSource || metadata.QueryType != 0 || metadata.IPCIDRMatchSource {
|
||||
return r.ipSet.Contains(metadata.Source.Addr)
|
||||
} else {
|
||||
if metadata.Destination.IsIP() {
|
||||
|
@ -43,6 +43,13 @@ func NewDomainItem(domains []string, domainSuffixes []string) *DomainItem {
|
||||
}
|
||||
}
|
||||
|
||||
func NewRawDomainItem(matcher *domain.Matcher) *DomainItem {
|
||||
return &DomainItem{
|
||||
matcher,
|
||||
"domain/domain_suffix=<binary>",
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DomainItem) Match(metadata *adapter.InboundContext) bool {
|
||||
var domainHost string
|
||||
if metadata.Domain != "" {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user