Loading...
Loading...
Generate LLM skills from documentation, codebases, and GitHub repositories
npx skill4agent add bahayonghang/my-claude-code-settings skill-seekerspip install skill-seekers
# Or: uv pip install skill-seekers| Source | Command |
|---|---|
| Local code | |
| Docs URL | |
| GitHub | |
|
# Analyze local codebase
skill-seekers-codebase --directory /path/to/project --output output/my-skill/
# Package for Claude
yes | skill-seekers package output/my-skill/ --no-open| Flag | Description |
|---|---|
| Analysis depth |
| Skip pattern detection |
| Skip test extraction |
| AI enhancement |
/home/lyh/Documents/Skill_Seekersreferences/patterns/G = nx.MultiGraph()
G.add_node(0)
G.add_edge(1, 2)
H = nx.MultiGraph()
H.add_edge(3, 4)
U = nx.full_join(G, H)
assert set(U) == set(G) | set(H)
assert len(U) == len(G) + len(H)
assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H)
U = nx.full_join(G, H, rename=('g', 'h'))
assert set(U) == {'g0', 'g1', 'g2', 'h3', 'h4'}
assert len(U) == len(G) + len(H)
assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H)
G = nx.MultiDiGraph()
G.add_node(0)
G.add_edge(1, 2)
H = nx.MultiDiGraph()
H.add_edge(3, 4)
U = nx.full_join(G, H)
assert set(U) == set(G) | set(H)
assert len(U) == len(G) + len(H)
assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2
U = nx.full_join(G, H, rename=('g', 'h'))
assert set(U) == {'g0', 'g1', 'g2', 'h3', 'h4'}
assert len(U) == len(G) + len(H)
assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2df = pd.DataFrame([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]], index=[1010001, 2, 1, 1010002], columns=[1010001, 2, 1, 1010002])embedding_expected.set_data({1: [2, 7], 2: [1, 3, 4, 5], 3: [2, 4], 4: [3, 6, 2], 5: [7, 2], 6: [4, 7], 7: [6, 1, 5]})
assert nx.utils.graphs_equal(embedding, embedding_expected)G = nx.Graph([(3, 10), (2, 13), (1, 13), (7, 11), (0, 8), (8, 13), (0, 2), (0, 7), (0, 10), (1, 7)])G = nx.Graph([(1, 2), (4, 13), (0, 13), (4, 5), (7, 10), (1, 7), (0, 3), (2, 6), (5, 6), (7, 13), (4, 8), (0, 8), (0, 9), (2, 13), (6, 7), (3, 6), (2, 8)])answer = {'Laura Mandeville': 0.07, 'Olivia Carleton': 0.04, 'Frances Anderson': 0.05, 'Pearl Oglethorpe': 0.04, 'Katherina Rogers': 0.06, 'Flora Price': 0.04, 'Dorothy Murchison': 0.04, 'Helen Lloyd': 0.06, 'Theresa Anderson': 0.07, 'Eleanor Nye': 0.05, 'Evelyn Jefferson': 0.07, 'Sylvia Avondale': 0.07, 'Charlotte McDowd': 0.05, 'Verne Sanderson': 0.05, 'Myra Liddel': 0.05, 'Brenda Rogers': 0.07, 'Ruth DeSand': 0.05, 'Nora Fayette': 0.07, 'E8': 0.11, 'E7': 0.09, 'E10': 0.07, 'E9': 0.1, 'E13': 0.05, 'E3': 0.07, 'E12': 0.07, 'E11': 0.06, 'E2': 0.05, 'E5': 0.08, 'E6': 0.08, 'E14': 0.05, 'E4': 0.06, 'E1': 0.05}answer = {'Laura Mandeville': 0.29, 'Olivia Carleton': 0.02, 'Frances Anderson': 0.06, 'Pearl Oglethorpe': 0.04, 'Katherina Rogers': 0.04, 'Flora Price': 0.02, 'Dorothy Murchison': 0.03, 'Helen Lloyd': 0.04, 'Theresa Anderson': 0.08, 'Eleanor Nye': 0.05, 'Evelyn Jefferson': 0.09, 'Sylvia Avondale': 0.05, 'Charlotte McDowd': 0.06, 'Verne Sanderson': 0.04, 'Myra Liddel': 0.03, 'Brenda Rogers': 0.08, 'Ruth DeSand': 0.05, 'Nora Fayette': 0.05, 'E8': 0.11, 'E7': 0.1, 'E10': 0.04, 'E9': 0.07, 'E13': 0.03, 'E3': 0.11, 'E12': 0.04, 'E11': 0.03, 'E2': 0.1, 'E5': 0.11, 'E6': 0.1, 'E14': 0.03, 'E4': 0.06, 'E1': 0.1}J.add_edges_from([(('C', 'E'), ('C',)), (('C',), ('A', 'B', 'C')), (('A', 'B', 'C'), ('C',)), (('C',), ('C', 'D'))])
assert nx.is_isomorphic(G, J)J.add_edges_from([(('A', 'B'), ('B',)), (('B',), ('B', 'C')), (('B', 'C'), ('C',)), (('C',), ('C', 'D'))])
assert nx.is_isomorphic(G, J)J.add_edges_from([(('A', 'D'), ('A',)), (('A',), ('A', 'C')), (('A', 'C'), ('C',)), (('C',), ('B', 'C')), (('B', 'C'), ('C',)), (('C',), ('C', 'E'))])
assert nx.is_isomorphic(G, J)references/test_examples/references/config_patterns/references/api_reference/references/dependencies/references/patterns/references/test_examples/references/config_patterns/