mirror of https://github.com/alibaba/MNN.git
Compare commits
4 Commits
3526e7c55a
...
868faa65b9
Author | SHA1 | Date |
---|---|---|
|
868faa65b9 | |
|
f9720f994e | |
|
7a08d2e2ee | |
|
a7e97ff798 |
|
@ -174,7 +174,7 @@ class ChatInputComponent(
|
|||
} else if (currentUserMessage != null) {
|
||||
visible = false
|
||||
} else if (!TextUtils.isEmpty(editUserMessage.text.toString())) {
|
||||
visible = false
|
||||
//visible = false
|
||||
}
|
||||
buttonSwitchVoice!!.visibility =
|
||||
if (visible) View.VISIBLE else View.GONE
|
||||
|
@ -227,13 +227,13 @@ class ChatInputComponent(
|
|||
voiceRecordingModule.setOnVoiceRecordingListener(object : VoiceRecordingListener {
|
||||
override fun onEnterRecordingMode() {
|
||||
updateAudioOutput()
|
||||
binding.btnToggleThinking.visibility = View.GONE
|
||||
editUserMessage.visibility = View.GONE
|
||||
// binding.btnToggleThinking.visibility = View.GONE
|
||||
// editUserMessage.visibility = View.GONE
|
||||
KeyboardUtils.hideKeyboard(editUserMessage)
|
||||
if (attachmentPickerModule != null) {
|
||||
attachmentPickerModule!!.hideAttachmentLayout()
|
||||
}
|
||||
editUserMessage.visibility = View.GONE
|
||||
// editUserMessage.visibility = View.GONE
|
||||
}
|
||||
|
||||
override fun onLeaveRecordingMode() {
|
||||
|
@ -250,10 +250,11 @@ class ChatInputComponent(
|
|||
override fun onRecordSuccess(duration: Float, recordingFilePath: String?) {
|
||||
val chatDataItem = ChatDataItem.createAudioInputData(
|
||||
chatActivity.dateFormat!!.format(Date()),
|
||||
"",
|
||||
editUserMessage.text.toString().trim { it <= ' ' },
|
||||
recordingFilePath!!,
|
||||
duration
|
||||
)
|
||||
editUserMessage.setText("")
|
||||
this@ChatInputComponent.onSendMessage?.let { it(chatDataItem) }
|
||||
}
|
||||
|
||||
|
|
|
@ -118,14 +118,16 @@
|
|||
android:padding="12dp"
|
||||
android:textColor="?attr/colorOnSurface"
|
||||
android:textSize="16sp"/>
|
||||
<RelativeLayout
|
||||
android:layout_width="wrap_content"
|
||||
android:layout_height="50dp">
|
||||
<LinearLayout
|
||||
android:layout_width="match_parent"
|
||||
android:orientation="vertical"
|
||||
android:layout_height="100dp">
|
||||
<RelativeLayout
|
||||
android:id="@+id/btn_voice_recording"
|
||||
android:layout_width="match_parent"
|
||||
android:layout_height="match_parent"
|
||||
android:visibility="gone"
|
||||
android:layout_height="50dp"
|
||||
android:visibility="visible"
|
||||
tools:visibility="visible"
|
||||
>
|
||||
<TextView
|
||||
android:layout_width="wrap_content"
|
||||
|
@ -208,7 +210,7 @@
|
|||
android:visibility="gone" />
|
||||
</FrameLayout>
|
||||
|
||||
</RelativeLayout>
|
||||
</LinearLayout>
|
||||
<LinearLayout
|
||||
android:id="@+id/layout_more_menu"
|
||||
android:layout_width="match_parent"
|
||||
|
@ -255,6 +257,7 @@
|
|||
android:visibility="gone"
|
||||
tools:visibility="visible"
|
||||
android:textColor="?colorOnSurface"
|
||||
android:background="@color/semi_transparent_gray"
|
||||
tools:text="@string/release_to_send" />
|
||||
|
||||
<RelativeLayout
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
<color name="color_on_surface_variant">#666666</color>
|
||||
|
||||
<color name="color_disabled">#888888</color>
|
||||
|
||||
<color name="semi_transparent_gray">#80888888</color>
|
||||
|
||||
|
||||
</resources>
|
|
@ -133,11 +133,11 @@ namespace MNN {
|
|||
void CoreMLBackend::onResizeBegin() {
|
||||
mCoreMLLayerPtrs.clear();
|
||||
}
|
||||
int CoreMLBackend::getBytes(const halide_type_t& type) {
|
||||
size_t CoreMLBackend::getBytes(const halide_type_t& type) {
|
||||
if (type.code == halide_type_float && mPrecision == BackendConfig::Precision_Low) {
|
||||
return 1;
|
||||
}
|
||||
return type.bytes();
|
||||
return static_cast<size_t>(type.bytes());
|
||||
}
|
||||
|
||||
ErrorCode CoreMLBackend::onResizeEnd() {
|
||||
|
|
|
@ -107,7 +107,7 @@ namespace MNN {
|
|||
void setLayerOutputs(CoreML__Specification__NeuralNetworkLayer* layer, std::vector<std::string>&& outputs);
|
||||
void copyName(char** ptr, std::string&& name);
|
||||
int getInOutTensorInfo(std::string modelName);
|
||||
int getBytes(const halide_type_t& type);
|
||||
size_t getBytes(const halide_type_t& type);
|
||||
|
||||
class Creator {
|
||||
public:
|
||||
|
|
|
@ -606,7 +606,7 @@ static OpType _getRealOpType(OpType opType) {
|
|||
}
|
||||
}
|
||||
void* CPUBackend::onMapTensor(Tensor::MapType mtype, Tensor::DimensionType dtype, const Tensor* srcTensor) {
|
||||
if (getBytes(this, srcTensor) != srcTensor->getType().bytes()) {
|
||||
if (static_cast<int>(getBytes(this, srcTensor)) != srcTensor->getType().bytes()) {
|
||||
return nullptr;
|
||||
}
|
||||
if (OpCommonUtils:: convertDimType(TensorUtils::getDescribe(srcTensor)->dimensionFormat) != dtype) {
|
||||
|
@ -617,7 +617,7 @@ void* CPUBackend::onMapTensor(Tensor::MapType mtype, Tensor::DimensionType dtype
|
|||
}
|
||||
|
||||
bool CPUBackend::onUnmapTensor(Tensor::MapType mtype, Tensor::DimensionType dtype, const Tensor* dstTensor, void* mapPtr) {
|
||||
if (getBytes(this, dstTensor) != dstTensor->getType().bytes()) {
|
||||
if (static_cast<int>(getBytes(this, dstTensor)) != dstTensor->getType().bytes()) {
|
||||
return false;
|
||||
}
|
||||
if (OpCommonUtils:: convertDimType(TensorUtils::getDescribe(dstTensor)->dimensionFormat) != dtype) {
|
||||
|
@ -651,8 +651,8 @@ size_t CPUBackend::getTensorSize(const Tensor* tensor, bool multiBytes) const {
|
|||
return dataSize;
|
||||
}
|
||||
|
||||
int CPUBackend::getBytes(const Backend* backend, const Tensor* output) {
|
||||
auto bytes = output->getType().bytes();
|
||||
size_t CPUBackend::getBytes(const Backend* backend, const Tensor* output) {
|
||||
size_t bytes = output->getType().bytes();
|
||||
auto core = static_cast<const CPUBackend*>(backend)->functions();
|
||||
auto quant = TensorUtils::getDescribe(output)->quantAttr.get();
|
||||
if (output->getType().code == halide_type_float) {
|
||||
|
|
|
@ -175,7 +175,7 @@ public:
|
|||
inline int taskIndex() const {return mRuntime->mTaskIndex;}
|
||||
#endif
|
||||
static void initCreatorMap();
|
||||
static int getBytes(const Backend* backend, const Tensor* output);
|
||||
static size_t getBytes(const Backend* backend, const Tensor* output);
|
||||
static DataType getDataType(const Tensor* tensor);
|
||||
friend class CPURuntime;
|
||||
void enqueueTask(std::function<int()>&& task);
|
||||
|
|
|
@ -1048,7 +1048,7 @@ ErrorCode DenseConvInt8TiledExecutor::onExecute(const std::vector<Tensor*>& inpu
|
|||
const auto kernelCountUnit = mIm2ColParamter.kernelCountUnit;
|
||||
const auto unitColBufferSize = kernelCountUnit * DST_XUNIT * SRC_UNIT * sizeof(int8_t);
|
||||
const auto colBufferSize = unitColBufferSize * mIm2ColCount;
|
||||
const int dstBytes = static_cast<CPUBackend*>(backend())->getBytes(backend(), output);
|
||||
const auto dstBytes = static_cast<CPUBackend*>(backend())->getBytes(backend(), output);
|
||||
const int blockL = kernelCountUnit / mBlockNum; // source depthQuad for each block.
|
||||
const int kxky = mIm2ColParamter.kernelX * mIm2ColParamter.kernelY;
|
||||
const int blocklu = blockL / kxky; // UP_DIV(ic,src_unit) per block
|
||||
|
|
|
@ -169,8 +169,8 @@ private:
|
|||
BufferAllocator* mAllocator;
|
||||
MemChunk mPoint;
|
||||
};
|
||||
int CUDABackend::getBytes(const Tensor* tensor) const {
|
||||
auto bytes = tensor->getType().bytes();
|
||||
size_t CUDABackend::getBytes(const Tensor* tensor) const {
|
||||
size_t bytes = tensor->getType().bytes();
|
||||
if (mPrecision == 2 || mPrecision == 3) {// Fp16 or Bf16
|
||||
if (halide_type_float == tensor->getType().code) {
|
||||
bytes = 2;
|
||||
|
|
|
@ -89,7 +89,7 @@ public:
|
|||
return mStaticBufferPool.get();
|
||||
}
|
||||
static size_t realSize(const Tensor *tensor);
|
||||
int getBytes(const Tensor* tensor) const;
|
||||
size_t getBytes(const Tensor* tensor) const;
|
||||
CPUResizeCache* getCache();
|
||||
bool useFp16() const;
|
||||
int getPrecision() const;
|
||||
|
|
|
@ -427,7 +427,6 @@ ErrorCode ConvWinogradExecution::onExecute(const std::vector<Tensor*> &inputs, c
|
|||
int co_pack = UP_DIV(mResource->mKernelInfo.kernelN, PACK_NUMBER) * PACK_NUMBER;
|
||||
int ci_pack = UP_DIV(mResource->mKernelInfo.kernelC, PACK_NUMBER) * PACK_NUMBER;
|
||||
|
||||
auto bytes = static_cast<CUDABackend*>(backend())->getBytes(input);
|
||||
const void *input_addr = (const void*)input->deviceId();
|
||||
const void *mGgGt_Buffer = mResource->mFilter;
|
||||
const void *bias_addr = mResource->mBias;
|
||||
|
|
|
@ -224,7 +224,6 @@ MatMulExecution::~ MatMulExecution() {
|
|||
|
||||
void MatMulExecution::setArguments(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
|
||||
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
|
||||
auto bytes = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]);
|
||||
auto pool = static_cast<CUDABackend*>(backend())->getBufferPool();
|
||||
|
||||
const Tensor* A = inputs[0];
|
||||
|
@ -971,7 +970,6 @@ void MatMulExecution::setArguments(const std::vector<Tensor *> &inputs, const st
|
|||
|
||||
ErrorCode MatMulExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
|
||||
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
|
||||
auto bytes = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]);
|
||||
|
||||
const Tensor* A = inputs[0];
|
||||
const Tensor* B = inputs[1];
|
||||
|
@ -1060,7 +1058,6 @@ ErrorCode MatMulExecution::onResize(const std::vector<Tensor *> &inputs, const s
|
|||
}
|
||||
|
||||
ErrorCode MatMulExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
|
||||
auto bytes = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]);
|
||||
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
|
||||
bool hAlignment = (mGemmInfo.elhPad[2] == mGemmInfo.elh[2]);
|
||||
|
||||
|
|
Loading…
Reference in New Issue