Skip to content

Commit 2121335

Browse files
committed
Merge remote-tracking branch 'upstream/master' into experts-warmup
2 parents c8bc6e4 + 84d5475 commit 2121335

File tree

415 files changed

+53219
-24245
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

415 files changed

+53219
-24245
lines changed

.devops/cuda.Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=22.04
22
# This needs to generally match the container host's environment.
3-
ARG CUDA_VERSION=12.6.0
3+
ARG CUDA_VERSION=12.4.0
44
# Target the CUDA build image
55
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
66

.devops/llama-cpp-cuda.srpm.spec

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@ Version: %( date "+%%Y%%m%%d" )
1717
Release: 1%{?dist}
1818
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
1919
License: MIT
20-
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
20+
Source0: https://github.com/ggml-org/llama.cpp/archive/refs/heads/master.tar.gz
2121
BuildRequires: coreutils make gcc-c++ git cuda-toolkit
2222
Requires: cuda-toolkit
23-
URL: https://github.com/ggerganov/llama.cpp
23+
URL: https://github.com/ggml-org/llama.cpp
2424

2525
%define debug_package %{nil}
2626
%define source_date_epoch_from_changelog 0

.devops/llama-cpp.srpm.spec

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,10 @@ Version: %( date "+%%Y%%m%%d" )
1818
Release: 1%{?dist}
1919
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
2020
License: MIT
21-
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
21+
Source0: https://github.com/ggml-org/llama.cpp/archive/refs/heads/master.tar.gz
2222
BuildRequires: coreutils make gcc-c++ git libstdc++-devel
2323
Requires: libstdc++
24-
URL: https://github.com/ggerganov/llama.cpp
24+
URL: https://github.com/ggml-org/llama.cpp
2525

2626
%define debug_package %{nil}
2727
%define source_date_epoch_from_changelog 0

.devops/musa.Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=22.04
22
# This needs to generally match the container host's environment.
3-
ARG MUSA_VERSION=rc3.1.0
3+
ARG MUSA_VERSION=rc3.1.1
44
# Target the MUSA build image
55
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
66

.devops/nix/package.nix

+3-3
Original file line numberDiff line numberDiff line change
@@ -133,12 +133,12 @@ effectiveStdenv.mkDerivation (finalAttrs: {
133133
--replace '[bundle pathForResource:@"default" ofType:@"metallib"];' "@\"$out/bin/default.metallib\";"
134134
'';
135135

136-
# With PR#6015 https://github.com/ggerganov/llama.cpp/pull/6015,
136+
# With PR#6015 https://github.com/ggml-org/llama.cpp/pull/6015,
137137
# `default.metallib` may be compiled with Metal compiler from XCode
138138
# and we need to escape sandbox on MacOS to access Metal compiler.
139139
# `xcrun` is used find the path of the Metal compiler, which is varible
140140
# and not on $PATH
141-
# see https://github.com/ggerganov/llama.cpp/pull/6118 for discussion
141+
# see https://github.com/ggml-org/llama.cpp/pull/6118 for discussion
142142
__noChroot = effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders;
143143

144144
nativeBuildInputs =
@@ -220,7 +220,7 @@ effectiveStdenv.mkDerivation (finalAttrs: {
220220
broken = (useMetalKit && !effectiveStdenv.isDarwin);
221221

222222
description = "Inference of LLaMA model in pure C/C++${descriptionSuffix}";
223-
homepage = "https://github.com/ggerganov/llama.cpp/";
223+
homepage = "https://github.com/ggml-org/llama.cpp/";
224224
license = lib.licenses.mit;
225225

226226
# Accommodates `nix run` and `lib.getExe`

.devops/rocm.Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-co
1111
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1212

1313
# Unless otherwise specified, we make a fat build.
14-
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
14+
# List from https://github.com/ggml-org/llama.cpp/pull/1087#issuecomment-1682807878
1515
# This is mostly tied to rocBLAS supported archs.
1616
# gfx803, gfx900, gfx1032, gfx1101, gfx1102,not officialy supported
1717
# gfx906 is deprecated

.github/ISSUE_TEMPLATE/020-enhancement.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ body:
66
- type: markdown
77
attributes:
88
value: |
9-
[Please post your idea first in Discussion if there is not yet a consensus for this enhancement request. This will help to keep this issue tracker focused on enhancements that the community has agreed needs to be implemented.](https://github.com/ggerganov/llama.cpp/discussions/categories/ideas)
9+
[Please post your idea first in Discussion if there is not yet a consensus for this enhancement request. This will help to keep this issue tracker focused on enhancements that the community has agreed needs to be implemented.](https://github.com/ggml-org/llama.cpp/discussions/categories/ideas)
1010
1111
- type: checkboxes
1212
id: prerequisites
@@ -16,11 +16,11 @@ body:
1616
options:
1717
- label: I am running the latest code. Mention the version if possible as well.
1818
required: true
19-
- label: I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md).
19+
- label: I carefully followed the [README.md](https://github.com/ggml-org/llama.cpp/blob/master/README.md).
2020
required: true
2121
- label: I searched using keywords relevant to my issue to make sure that I am creating a new issue that is not already open (or closed).
2222
required: true
23-
- label: I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new and useful enhancement to share.
23+
- label: I reviewed the [Discussions](https://github.com/ggml-org/llama.cpp/discussions), and have a new and useful enhancement to share.
2424
required: true
2525

2626
- type: textarea

.github/ISSUE_TEMPLATE/030-research.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ body:
66
- type: markdown
77
attributes:
88
value: |
9-
Don't forget to check for any [duplicate research issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22)
9+
Don't forget to check for any [duplicate research issue tickets](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22)
1010
1111
- type: checkboxes
1212
id: research-stage

.github/ISSUE_TEMPLATE/040-refactor.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@ body:
66
- type: markdown
77
attributes:
88
value: |
9-
Don't forget to [check for existing refactor issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3Arefactoring) in case it's already covered.
10-
Also you may want to check [Pull request refactor label as well](https://github.com/ggerganov/llama.cpp/pulls?q=is%3Aopen+is%3Apr+label%3Arefactoring) for duplicates too.
9+
Don't forget to [check for existing refactor issue tickets](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3Arefactoring) in case it's already covered.
10+
Also you may want to check [Pull request refactor label as well](https://github.com/ggml-org/llama.cpp/pulls?q=is%3Aopen+is%3Apr+label%3Arefactoring) for duplicates too.
1111
1212
- type: textarea
1313
id: background-description

.github/ISSUE_TEMPLATE/config.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
blank_issues_enabled: true
22
contact_links:
33
- name: Got an idea?
4-
url: https://github.com/ggerganov/llama.cpp/discussions/categories/ideas
4+
url: https://github.com/ggml-org/llama.cpp/discussions/categories/ideas
55
about: Pop it there. It may then become an enhancement ticket.
66
- name: Got a question?
7-
url: https://github.com/ggerganov/llama.cpp/discussions/categories/q-a
7+
url: https://github.com/ggml-org/llama.cpp/discussions/categories/q-a
88
about: Ask a question there!
99
- name: Want to contribute?
10-
url: https://github.com/ggerganov/llama.cpp/wiki/contribute
10+
url: https://github.com/ggml-org/llama.cpp/wiki/contribute
1111
about: Head to the contribution guide page of the wiki for areas you can help with

.github/pull_request_template.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
*Make sure to read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md) before submitting a PR*
1+
*Make sure to read the [contributing guidelines](https://github.com/ggml-org/llama.cpp/blob/master/CONTRIBUTING.md) before submitting a PR*

.github/workflows/bench.yml.disabled

+1-11
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# TODO: there have been some issues with the workflow, so disabling for now
2-
# https://github.com/ggerganov/llama.cpp/issues/7893
2+
# https://github.com/ggml-org/llama.cpp/issues/7893
33
#
44
# Benchmark
55
name: Benchmark
@@ -57,17 +57,7 @@ jobs:
5757

5858
if: |
5959
inputs.gpu-series == 'Standard_NC4as_T4_v3'
60-
|| (
61-
github.event_name == 'schedule'
62-
&& github.ref_name == 'master'
63-
&& github.repository_owner == 'ggerganov'
64-
)
6560
|| github.event_name == 'pull_request_target'
66-
|| (
67-
github.event_name == 'push'
68-
&& github.event.ref == 'refs/heads/master'
69-
&& github.repository_owner == 'ggerganov'
70-
)
7161
steps:
7262
- name: Clone
7363
id: checkout

0 commit comments

Comments
 (0)